code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=24 , snake_case__=2 , snake_case__=6 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=1_000 , ):
"""simple docstring"""
lowerCAmelCase : Dict = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[str] = seq_length
lowerCAmelCase : Any = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Any = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : int = range_bbox
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase : List[Any] = bbox[i, j, 3]
lowerCAmelCase : Optional[Any] = bbox[i, j, 1]
lowerCAmelCase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase : Optional[Any] = bbox[i, j, 2]
lowerCAmelCase : str = bbox[i, j, 0]
lowerCAmelCase : List[Any] = t
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Any = None
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = LiltModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , bbox=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ , bbox=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.num_labels
lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = LiltForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : str = config_and_inputs
lowerCAmelCase : int = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a : Any =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[Any] =False
a : Union[str, Any] =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return True
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = LiltModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = LiltModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case__ )
lowerCAmelCase : str = torch.tensor([[1, 2]] , device=snake_case__ )
lowerCAmelCase : Tuple = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(input_ids=snake_case__ , bbox=snake_case__ )
lowerCAmelCase : str = torch.Size([1, 2, 768] )
lowerCAmelCase : List[str] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case__ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case__ , atol=1e-3 ) )
| 645 | 0 |
import qiskit
def __magic_name__ ( lowercase_ = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase_ ) ) , list(range(lowercase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 414 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase = Features({"""text""": Value("""string""" )} )
lowercase = Features({"""labels""": ClassLabel} )
lowercase = "text"
lowercase = "labels"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase = copy.deepcopy(self )
UpperCamelCase = self.label_schema.copy()
UpperCamelCase = features[self.label_column]
UpperCamelCase = label_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 414 | 1 |
from collections.abc import Sequence
def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
A = nums[i]
A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case :str =int(input('Enter number of elements : ').strip())
__snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 106 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = None
_A = None
_A = None
_A = None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(F"{other} (type {type(lowercase__ )}) cannot be compared to version." )
def __eq__( self , lowercase__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.version_str
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 421 | 0 |
'''simple docstring'''
import itertools
import math
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ():
"""simple docstring"""
_UpperCamelCase =2
while True:
if is_prime(__SCREAMING_SNAKE_CASE ):
yield num
num += 1
def _a (__SCREAMING_SNAKE_CASE = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 271 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase =DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_UpperCamelCase ='''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase =366
_UpperCamelCase ='''object365-id2label.json'''
else:
_UpperCamelCase =91
_UpperCamelCase ='''coco-detection-id2label.json'''
_UpperCamelCase =num_labels
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:dim, :]
_UpperCamelCase =in_proj_bias[: dim]
_UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase =in_proj_bias[
dim : dim * 2
]
_UpperCamelCase =in_proj_weight[
-dim :, :
]
_UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:hidden_size, :]
_UpperCamelCase =in_proj_bias[:hidden_size]
_UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase =in_proj_weight[-hidden_size:, :]
_UpperCamelCase =in_proj_bias[-hidden_size:]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_UpperCamelCase =create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "input_proj" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# finally, create HuggingFace model and load state dict
_UpperCamelCase =DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_UpperCamelCase =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase =prepare_img()
_UpperCamelCase =processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 271 | 1 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __UpperCamelCase ( UpperCamelCase ):
lowercase_ : torch.FloatTensor
lowercase_ : Optional[torch.FloatTensor] = None
def UpperCAmelCase ( a__ , a__=0.999 , a__="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(a__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCAmelCase :Union[str, Any] = []
for i in range(a__ ):
lowerCAmelCase :List[str] = i / num_diffusion_timesteps
lowerCAmelCase :Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__ ) / alpha_bar_fn(a__ ) , a__ ) )
return torch.tensor(a__ , dtype=torch.floataa )
class __UpperCamelCase ( UpperCamelCase , UpperCamelCase ):
lowercase_ : Union[str, Any] = 1
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 1000 , UpperCAmelCase : float = 0.0_0_0_1 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : str = "linear" , UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : float = 1.0 , **UpperCAmelCase : Optional[int] , ) -> Dict:
if kwargs.get('set_alpha_to_one' , UpperCAmelCase ) is not None:
lowerCAmelCase :Optional[Any] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , UpperCAmelCase , standard_warn=UpperCAmelCase )
lowerCAmelCase :Dict = kwargs['set_alpha_to_one']
if trained_betas is not None:
lowerCAmelCase :Any = torch.tensor(UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase :Union[str, Any] = torch.linspace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase :Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase :Optional[Any] = betas_for_alpha_bar(UpperCAmelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCAmelCase :Tuple = 1.0 - self.betas
lowerCAmelCase :Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase :str = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase :List[Any] = 1.0
# setable values
lowerCAmelCase :List[str] = None
lowerCAmelCase :int = torch.from_numpy(np.arange(0 , UpperCAmelCase ).copy().astype(np.intaa ) )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ) -> int:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowerCAmelCase :Optional[int] = num_inference_steps
lowerCAmelCase :Tuple = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase :List[str] = (np.arange(0 , UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase :str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
self.timesteps += self.config.steps_offset
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float = 0.0 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
lowerCAmelCase :Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase :Dict = self.alphas_cumprod[timestep]
lowerCAmelCase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase :Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase :Any = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase :Optional[int] = model_output
lowerCAmelCase :int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase :str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase :Dict = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase :Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase :List[str] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return self.config.num_train_timesteps | 553 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : Union[str, Any] , **UpperCAmelCase : List[Any] ) -> int:
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : Dict , ) -> Any:
if "text_queries" in kwargs:
lowerCAmelCase :Dict = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
lowerCAmelCase :List[str] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCAmelCase :Union[str, Any] = image
lowerCAmelCase :Optional[int] = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def UpperCAmelCase__ ( self : List[Any] , **UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCAmelCase :Any = {}
if "threshold" in kwargs:
lowerCAmelCase :int = kwargs['threshold']
if "top_k" in kwargs:
lowerCAmelCase :str = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Union[str, Any] ) -> str:
lowerCAmelCase :Dict = load_image(inputs['image'] )
lowerCAmelCase :Dict = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase :str = candidate_labels.split(',' )
lowerCAmelCase :str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
lowerCAmelCase :Any = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Optional[Any] = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> List[Any]:
lowerCAmelCase :Union[str, Any] = model_inputs.pop('target_size' )
lowerCAmelCase :List[str] = model_inputs.pop('candidate_label' )
lowerCAmelCase :Union[str, Any] = model_inputs.pop('is_last' )
lowerCAmelCase :int = self.model(**UpperCAmelCase )
lowerCAmelCase :Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=None ) -> str:
lowerCAmelCase :int = []
for model_output in model_outputs:
lowerCAmelCase :int = model_output['candidate_label']
lowerCAmelCase :List[Any] = BaseModelOutput(UpperCAmelCase )
lowerCAmelCase :Dict = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase :Tuple = outputs['scores'][index].item()
lowerCAmelCase :int = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCAmelCase :Optional[int] = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
lowerCAmelCase :Tuple = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
lowerCAmelCase :Dict = results[:top_k]
return results
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = box.int().tolist()
lowerCAmelCase :Union[str, Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 553 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case__ : Optional[int] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 637 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637 | 1 |
'''simple docstring'''
import os
import sys
__UpperCamelCase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Tuple:
"""simple docstring"""
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_a = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = patch_size
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_UpperCamelCase = frequency_out_dimension * time_out_dimension
_UpperCamelCase = num_patches + 2
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ASTModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ASTModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ASTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.default_feature_extractor
_UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a)
_UpperCamelCase = self.default_feature_extractor
_UpperCamelCase , _UpperCamelCase = prepare_audio()
_UpperCamelCase = audio.squeeze().numpy()
_UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 5_27))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
| 78 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCamelCase_ : int = logging.getLogger(__name__)
def A_ ():
'''simple docstring'''
A_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__a , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__a , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__a , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__a , default="data/dump" , help="The dump file prefix." )
A_ = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
A_ = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A_ = fp.readlines()
logger.info("Start encoding" )
logger.info(f'{len(__a )} examples to process.' )
A_ = []
A_ = 0
A_ = 1_0000
A_ = time.time()
for text in data:
A_ = f'{bos} {text.strip()} {sep}'
A_ = tokenizer.encode(__a , add_special_tokens=__a )
rslt.append(__a )
iter += 1
if iter % interval == 0:
A_ = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
A_ = time.time()
logger.info("Finished binarization" )
logger.info(f'{len(__a )} examples processed.' )
A_ = f'{args.dump_file}.{args.tokenizer_name}.pickle'
A_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ = [np.uintaa(__a ) for d in rslt]
else:
A_ = [np.intaa(__a ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(__a , "wb" ) as handle:
pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 115 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ : List[Any] = 256
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = ["melgan"]
def __init__( self : Dict , _snake_case : SpectrogramNotesEncoder , _snake_case : SpectrogramContEncoder , _snake_case : TaFilmDecoder , _snake_case : DDPMScheduler , _snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
A_ = math.log(1e-5 ) # Matches MelGAN training.
A_ = 4.0 # Largest value for most examples
A_ = 128
self.register_modules(
notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : str=(-1.0, 1.0) , _snake_case : int=False ) -> str:
"""simple docstring"""
A_ , A_ = output_range
if clip:
A_ = torch.clip(_snake_case , self.min_value , self.max_value )
# Scale to [0, 1].
A_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase__ ( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=(-1.0, 1.0) , _snake_case : List[str]=False ) -> List[str]:
"""simple docstring"""
A_ , A_ = input_range
A_ = torch.clip(_snake_case , _snake_case , _snake_case ) if clip else outputs
# Scale to [0, 1].
A_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = input_tokens > 0
A_ , A_ = self.notes_encoder(
encoder_input_tokens=_snake_case , encoder_inputs_mask=_snake_case )
A_ , A_ = self.continuous_encoder(
encoder_inputs=_snake_case , encoder_inputs_mask=_snake_case )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = noise_time
if not torch.is_tensor(_snake_case ):
A_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
A_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A_ = self.decoder(
encodings_and_masks=_snake_case , decoder_input_tokens=_snake_case , decoder_noise_time=_snake_case )
return logits
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : List[List[int]] , _snake_case : Optional[torch.Generator] = None , _snake_case : int = 100 , _snake_case : bool = True , _snake_case : str = "numpy" , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_snake_case )}.' )
A_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A_ = np.zeros([1, 0, self.n_dims] , np.floataa )
A_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
for i, encoder_input_tokens in enumerate(_snake_case ):
if i == 0:
A_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A_ = ones
A_ = self.scale_features(
_snake_case , output_range=[-1.0, 1.0] , clip=_snake_case )
A_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_snake_case , continuous_mask=_snake_case , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_snake_case , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_snake_case )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ = self.decode(
encodings_and_masks=_snake_case , input_tokens=_snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
A_ = self.scale_to_features(_snake_case , input_range=[-1.0, 1.0] )
A_ = mel[:1]
A_ = mel.cpu().float().numpy()
A_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case )
logger.info("Generated segment" , _snake_case )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_snake_case )
| 115 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __magic_name__( _A=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser("""env""" )
else:
UpperCamelCase__ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=_A , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = is_xpu_available()
UpperCamelCase__ = is_npu_available()
UpperCamelCase__ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_A ):
UpperCamelCase__ = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase__ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(_A ),
"""PyTorch NPU available""": str(_A ),
"""System RAM""": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCamelCase__ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
UpperCamelCase__ = (
"""\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_A , _A )
else f"\t{accelerate_config}"
)
print(_A )
UpperCamelCase__ = accelerate_config
return info
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = env_command_parser()
UpperCamelCase__ = parser.parse_args()
env_command(_A )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 265 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : bool = True , lowercase : bool = False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = scheduler
UpperCamelCase__ = optimizers if isinstance(lowercase , (list, tuple) ) else [optimizers]
UpperCamelCase__ = split_batches
UpperCamelCase__ = step_with_optimizer
UpperCamelCase__ = GradientState()
def A ( self : int , *lowercase : int , **lowercase : str ) -> str:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase , **lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase , **lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase__ = AcceleratorState().num_processes
for _ in range(lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase , **lowercase )
else:
self.scheduler.step(*lowercase , **lowercase )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A ( self : Any ) -> int:
'''simple docstring'''
return self.scheduler.state_dict()
def A ( self : Any , lowercase : int ) -> Optional[int]:
'''simple docstring'''
self.scheduler.load_state_dict(lowercase )
def A ( self : str ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : str ) -> str:
'''simple docstring'''
return self.scheduler.print_lr(*lowercase , **lowercase )
| 265 | 1 |
"""simple docstring"""
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 65 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCAmelCase__ : int = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
# load decoder from hub
UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder"""
def __lowercase ( self : str ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[str] ,**A : Dict ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Any ,**A : List[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A )
def __lowercase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(A ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : List[Any] = """This is a test string"""
UpperCAmelCase__ : int = processor(text=A )
UpperCAmelCase__ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ):
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase__ : Tuple = processor.decode(A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __lowercase ( self : List[str] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : List[str] = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A )
UpperCAmelCase__ : Optional[Any] = list(A )
with get_context("""fork""" ).Pool() as p:
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(A ,decoded_processor.logit_score )
self.assertListEqual(A ,decoded_processor.lm_score )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Any = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : List[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[str] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(A )
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A )
self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) )
self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = 2.0
UpperCAmelCase__ : str = 5.0
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : str = processor.batch_decode(
A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
UpperCAmelCase__ : Any = decoded_processor_out.text
UpperCAmelCase__ : Union[str, Any] = list(A )
decoder.reset_params(
alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(
A ,A ,)
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A )
UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[int] = os.listdir(A )
UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A )
UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Tuple = os.listdir(A )
UpperCAmelCase__ : Dict = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = floats_list((3, 1_000) )
UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" )
UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A )
UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def __lowercase ( A : Optional[Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : int = self._get_dummy_logits()
UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A )
UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Tuple = iter(A )
UpperCAmelCase__ : Optional[int] = next(A )
UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy()
UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A )
UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A )
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text )
# output times
UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) )
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
| 65 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = 50_257 , lowercase = 1_024 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = None , lowercase = "gelu_new" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1e-5 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = False , lowercase = False , ) -> int:
super().__init__()
lowerCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowerCAmelCase = prefix_inner_dim
lowerCAmelCase = prefix_hidden_dim
lowerCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase = (
nn.Linear(self.prefix_hidden_dim , lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase = GPTaConfig(
vocab_size=lowercase , n_positions=lowercase , n_embd=lowercase , n_layer=lowercase , n_head=lowercase , n_inner=lowercase , activation_function=lowercase , resid_pdrop=lowercase , embd_pdrop=lowercase , attn_pdrop=lowercase , layer_norm_epsilon=lowercase , initializer_range=lowercase , scale_attn_weights=lowercase , use_cache=lowercase , scale_attn_by_inverse_layer_idx=lowercase , reorder_and_upcast_attn=lowercase , )
lowerCAmelCase = GPTaLMHeadModel(lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = None , ) -> List[str]:
lowerCAmelCase = self.transformer.transformer.wte(lowercase )
lowerCAmelCase = self.encode_prefix(lowercase )
lowerCAmelCase = self.decode_prefix(lowercase )
lowerCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase = self.transformer(inputs_embeds=lowercase , labels=lowercase , attention_mask=lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _snake_case ( self , lowercase , lowercase ) -> torch.Tensor:
return torch.zeros(lowercase , self.prefix_length , dtype=torch.intaa , device=lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
return self.encode_prefix(lowercase )
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase , lowercase ) -> str:
lowerCAmelCase = torch.split(lowercase , 1 , dim=0 )
lowerCAmelCase = []
lowerCAmelCase = []
for feature in features:
lowerCAmelCase = self.decode_prefix(feature.to(lowercase ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase , lowerCAmelCase = self.generate_beam(
input_embeds=lowercase , device=lowercase , eos_token_id=lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase = torch.stack(lowercase )
lowerCAmelCase = torch.stack(lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _snake_case ( self , lowercase=None , lowercase=None , lowercase=None , lowercase = 5 , lowercase = 67 , lowercase = 1.0 , lowercase = None , ) -> int:
lowerCAmelCase = eos_token_id
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = torch.ones(lowercase , device=lowercase , dtype=torch.int )
lowerCAmelCase = torch.zeros(lowercase , device=lowercase , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase = input_embeds
else:
lowerCAmelCase = self.transformer.transformer.wte(lowercase )
for i in range(lowercase ):
lowerCAmelCase = self.transformer(inputs_embeds=lowercase )
lowerCAmelCase = outputs.logits
lowerCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase , lowerCAmelCase = logits.topk(lowercase , -1 )
lowerCAmelCase = generated.expand(lowercase , *generated.shape[1:] )
lowerCAmelCase , lowerCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase = next_tokens
else:
lowerCAmelCase = tokens.expand(lowercase , *tokens.shape[1:] )
lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase = -float(np.inf )
lowerCAmelCase = 0
lowerCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase = scores_sum / seq_lengths[:, None]
lowerCAmelCase , lowerCAmelCase = scores_sum_average.view(-1 ).topk(lowercase , -1 )
lowerCAmelCase = next_tokens // scores_sum.shape[1]
lowerCAmelCase = seq_lengths[next_tokens_source]
lowerCAmelCase = next_tokens % scores_sum.shape[1]
lowerCAmelCase = next_tokens.unsqueeze(1 )
lowerCAmelCase = tokens[next_tokens_source]
lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase = generated[next_tokens_source]
lowerCAmelCase = scores_sum_average * seq_lengths
lowerCAmelCase = is_stopped[next_tokens_source]
lowerCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase = is_stopped + next_tokens.eq(lowercase ).squeeze()
if is_stopped.all():
break
lowerCAmelCase = scores / seq_lengths
lowerCAmelCase = scores.argsort(descending=lowercase )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase = [tokens[i] for i in order]
lowerCAmelCase = torch.stack(lowercase , dim=0 )
lowerCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 393 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as input_file:
lowerCAmelCase = [
[int(SCREAMING_SNAKE_CASE ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(matrix[0] )
lowerCAmelCase = [[-1 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 393 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: int = PegasusConfig
__UpperCamelCase: List[Any] = {}
__UpperCamelCase: Dict = "gelu"
def __init__( self : int , A : Optional[int] , A : str=13 , A : List[str]=7 , A : Optional[int]=True , A : Union[str, Any]=False , A : List[str]=99 , A : Any=32 , A : Tuple=5 , A : Optional[int]=4 , A : Tuple=37 , A : str=0.1 , A : Optional[Any]=0.1 , A : Dict=20 , A : int=2 , A : List[str]=1 , A : Optional[Any]=0 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : Dict = pad_token_id
_UpperCAmelCase : int = bos_token_id
def _A ( self : Optional[int] ):
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def _A ( self : Any , A : str , A : Optional[Any] , A : Optional[Any] ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Optional[Any] = model_class_name(A )
_UpperCAmelCase : str = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
_UpperCAmelCase : Optional[Any] = model.decode(A , A )
_UpperCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def _A ( self : Tuple , A : Union[str, Any] , A : Optional[Any] , A : int ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Tuple = model_class_name(A )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
_UpperCAmelCase : Optional[int] = model.decode(A , A , decoder_attention_mask=A )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : Any = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : int = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase: Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase: List[str] = True
__UpperCamelCase: List[Any] = False
__UpperCamelCase: Any = False
__UpperCamelCase: str = False
def _A ( self : List[Any] ):
_UpperCAmelCase : int = FlaxPegasusModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : List[Any] = model_class(A )
@jax.jit
def encode_jitted(A : List[Any] , A : List[Any]=None , **A : Optional[int] ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : int = encode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] , A : Optional[int] , A : Optional[int] ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = decode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[str] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self : List[str] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=A )
_UpperCAmelCase : List[Any] = np.ones((1, 1) )
_UpperCAmelCase : Dict = model(A )
self.assertIsNotNone(A )
@slow
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Any = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_UpperCAmelCase : Dict = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="np" , truncation=A , max_length=512 , padding=A )
_UpperCAmelCase : List[str] = model.generate(**A , num_beams=2 ).sequences
_UpperCAmelCase : List[str] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 244 | '''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = "AutoTokenizer"
__UpperCamelCase: Optional[Any] = ["tokenizer"]
__UpperCamelCase: Union[str, Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : List[str] , A : List[str] , A : int=None ):
super().__init__(A )
_UpperCAmelCase : Optional[Any] = speaker_embeddings
@classmethod
def _A ( cls : Optional[Any] , A : Tuple , A : int="speaker_embeddings_path.json" , **A : Tuple ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase : int = get_file_from_repo(
A , A , subfolder=kwargs.pop("subfolder" , A ) , cache_dir=kwargs.pop("cache_dir" , A ) , force_download=kwargs.pop("force_download" , A ) , proxies=kwargs.pop("proxies" , A ) , resume_download=kwargs.pop("resume_download" , A ) , local_files_only=kwargs.pop("local_files_only" , A ) , use_auth_token=kwargs.pop("use_auth_token" , A ) , revision=kwargs.pop("revision" , A ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(A , A )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase : List[str] = None
else:
with open(A ) as speaker_embeddings_json:
_UpperCAmelCase : List[str] = json.load(A )
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(A , **A )
return cls(tokenizer=A , speaker_embeddings=A )
def _A ( self : Dict , A : int , A : str="speaker_embeddings_path.json" , A : Any="speaker_embeddings" , A : bool = False , **A : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A , A , "v2" ) , exist_ok=A )
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase : Optional[Any] = self._load_voice_preset(A )
_UpperCAmelCase : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , A , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A , )
_UpperCAmelCase : Optional[int] = os.path.join(A , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase : str = tmp_dict
with open(os.path.join(A , A ) , "w" ) as fp:
json.dump(A , A )
super().save_pretrained(A , A , **A )
def _A ( self : Union[str, Any] , A : str = None , **A : Tuple ):
_UpperCAmelCase : int = self.speaker_embeddings[voice_preset]
_UpperCAmelCase : List[str] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase : int = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , A ) , cache_dir=kwargs.pop("cache_dir" , A ) , force_download=kwargs.pop("force_download" , A ) , proxies=kwargs.pop("proxies" , A ) , resume_download=kwargs.pop("resume_download" , A ) , local_files_only=kwargs.pop("local_files_only" , A ) , use_auth_token=kwargs.pop("use_auth_token" , A ) , revision=kwargs.pop("revision" , A ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase : List[str] = np.load(A )
return voice_preset_dict
def _A ( self : Optional[int] , A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Optional[Any] , A : Optional[Any]=None , A : Optional[Any]=None , A : List[Any]="pt" , A : Any=256 , A : Union[str, Any]=False , A : Tuple=True , A : int=False , **A : List[str] , ):
if voice_preset is not None and not isinstance(A , A ):
if (
isinstance(A , A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase : str = self._load_voice_preset(A )
else:
if isinstance(A , A ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase : str = voice_preset + ".npz"
_UpperCAmelCase : Optional[Any] = np.load(A )
if voice_preset is not None:
self._validate_voice_preset_dict(A , **A )
_UpperCAmelCase : Any = BatchFeature(data=A , tensor_type=A )
_UpperCAmelCase : List[str] = self.tokenizer(
A , return_tensors=A , padding="max_length" , max_length=A , return_attention_mask=A , return_token_type_ids=A , add_special_tokens=A , **A , )
if voice_preset is not None:
_UpperCAmelCase : Optional[int] = voice_preset
return encoded_text
| 244 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : Optional[int] , A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = list(range(len(_lowerCAmelCase ) ) )
lowerCAmelCase_ : Dict = [v / w for v, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
index.sort(key=lambda A__ : ratio[i] , reverse=_lowerCAmelCase )
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : list[float] = [0] * len(_lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase_ : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase_ : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 398 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowercase = {"""UserAgent""": UserAgent().random}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = script.contents[0]
A_ = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self : Any , lowerCamelCase__ : int ) -> str:
"""simple docstring"""
A_ = F"https://www.instagram.com/{username}/"
A_ = self.get_json()
def UpperCamelCase ( self : Dict ) -> dict:
"""simple docstring"""
A_ = requests.get(self.url , headers=lowerCamelCase__ ).text
A_ = BeautifulSoup(lowerCamelCase__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ) -> str:
"""simple docstring"""
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase ( self : Any ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def UpperCamelCase ( self : str ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = "github" ):
'''simple docstring'''
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
A_ = InstagramUser(SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = InstagramUser("""github""")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 203 |
__lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert len(str(SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A_ = year // 100
A_ = (5 * (century % 4) + 2) % 7
A_ = year % 100
A_ = centurian % 12
A_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 1 |
"""simple docstring"""
from __future__ import annotations
__snake_case : List[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> str:
__lowerCAmelCase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
__lowerCAmelCase : Dict = init[0]
__lowerCAmelCase : Tuple = init[1]
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Tuple = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase : str = [[f, g, x, y]]
__lowerCAmelCase : Tuple = False # flag that is set when search is complete
__lowerCAmelCase : Optional[int] = False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase : Optional[int] = cell.pop()
__lowerCAmelCase : Union[str, Any] = next_cell[2]
__lowerCAmelCase : List[Any] = next_cell[3]
__lowerCAmelCase : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase : List[str] = True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
__lowerCAmelCase : int = x + DIRECTIONS[i][0]
__lowerCAmelCase : int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase : List[str] = g + cost
__lowerCAmelCase : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Dict = goal[0]
__lowerCAmelCase : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase : Dict = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase : Tuple = xa
__lowerCAmelCase : Union[str, Any] = ya
invpath.append([x, y] )
__lowerCAmelCase : Union[str, Any] = []
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
__snake_case : Tuple = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__snake_case : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
__snake_case : Any = [len(grid) - 1, len(grid[0]) - 1]
__snake_case : str = 1
# the cost map which pushes the path closer to the goal
__snake_case : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__snake_case : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__snake_case : str = 99
__snake_case : int = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 706 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__snake_case : Tuple = logging.get_logger(__name__)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Optional[Any] = set()
__lowerCAmelCase : List[Any] = []
def parse_line(__snake_case ):
for line in fp:
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__snake_case ) > 0:
__lowerCAmelCase : List[Any] = "\n".join(__snake_case )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__snake_case )
buffer.clear()
continue
else:
__lowerCAmelCase : List[str] = line.strip()
buffer.append(__snake_case )
if from_gh:
for filename in os.listdir(__snake_case ):
__lowerCAmelCase : List[str] = os.path.join(__snake_case ,__snake_case )
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(__snake_case ) as fp:
parse_line(__snake_case )
else:
try:
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__snake_case ) as fp:
parse_line(__snake_case )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _lowercase ( __snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : str = [os.path.join(__snake_case ,__snake_case ) for p in os.listdir(__snake_case ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__snake_case ,__snake_case ) )
return selected_warnings
if __name__ == "__main__":
def _lowercase ( __snake_case ) -> Optional[Any]:
return values.split("," )
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__snake_case : Tuple = parser.parse_args()
__snake_case : Any = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__snake_case : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__snake_case : Tuple = extract_warnings(args.output_dir, args.targets)
__snake_case : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 615 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : List[str] , _A : str=13 , _A : str=7 , _A : str=True , _A : Tuple=True , _A : int=True , _A : Optional[Any]=True , _A : Tuple=99 , _A : int=[1, 1, 2] , _A : Union[str, Any]=1 , _A : Union[str, Any]=32 , _A : List[Any]=4 , _A : List[Any]=8 , _A : str=37 , _A : Optional[Any]="gelu_new" , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : Tuple=0.0 , _A : Dict=512 , _A : int=3 , _A : List[Any]=0.02 , _A : Optional[int]=3 , _A : str=4 , _A : List[str]=None , _A : Dict=False , ) -> Optional[int]:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : List[Any] = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : int = is_training
__magic_name__ : Dict = use_input_mask
__magic_name__ : Tuple = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Tuple = block_sizes
__magic_name__ : Dict = num_decoder_layers
__magic_name__ : Tuple = d_model
__magic_name__ : Optional[int] = n_head
__magic_name__ : str = d_head
__magic_name__ : str = d_inner
__magic_name__ : Tuple = hidden_act
__magic_name__ : Optional[int] = hidden_dropout
__magic_name__ : str = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Optional[int] = 2
__magic_name__ : Tuple = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Tuple = scope
__magic_name__ : Tuple = initializer_std
# Used in the tests to check the size of the first attention layer
__magic_name__ : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
__magic_name__ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__magic_name__ : str = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__magic_name__ : Dict = self.num_hidden_layers + 2
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[int] = None
if self.use_input_mask:
__magic_name__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : List[Any] = None
__magic_name__ : Any = None
if self.use_labels:
__magic_name__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Union[str, Any] , _A : Optional[Any] , _A : Tuple , _A : Optional[int] , _A : str , _A : int , ) -> Tuple:
__magic_name__ : Optional[int] = TFFunnelModel(config=_A )
__magic_name__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : str = model(_A )
__magic_name__ : str = [input_ids, input_mask]
__magic_name__ : Union[str, Any] = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__magic_name__ : Optional[int] = False
__magic_name__ : Union[str, Any] = TFFunnelModel(config=_A )
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = TFFunnelModel(config=_A )
__magic_name__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __lowerCAmelCase ( self : Any , _A : List[str] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : int , ) -> List[Any]:
__magic_name__ : Tuple = TFFunnelBaseModel(config=_A )
__magic_name__ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : int = model(_A )
__magic_name__ : List[str] = [input_ids, input_mask]
__magic_name__ : List[Any] = model(_A )
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__magic_name__ : List[Any] = False
__magic_name__ : Optional[int] = TFFunnelBaseModel(config=_A )
__magic_name__ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__magic_name__ : Tuple = False
__magic_name__ : Dict = TFFunnelBaseModel(config=_A )
__magic_name__ : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : List[str] , _A : List[str] , _A : str , ) -> Any:
__magic_name__ : Optional[Any] = TFFunnelForPreTraining(config=_A )
__magic_name__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] , ) -> List[str]:
__magic_name__ : List[Any] = TFFunnelForMaskedLM(config=_A )
__magic_name__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : str = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : int , _A : List[str] , _A : Any , _A : Dict , _A : int , _A : Dict , _A : Tuple , _A : Tuple , ) -> Tuple:
__magic_name__ : Dict = self.num_labels
__magic_name__ : Tuple = TFFunnelForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : str = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : str , _A : Optional[int] , _A : List[str] , _A : List[str] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , ) -> str:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Any = TFFunnelForMultipleChoice(config=_A )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : List[Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : int , _A : List[Any] , _A : Optional[int] , _A : List[Any] , ) -> Optional[int]:
__magic_name__ : Dict = self.num_labels
__magic_name__ : List[str] = TFFunnelForTokenClassification(config=_A )
__magic_name__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[Any] , _A : int , _A : Optional[Any] , ) -> str:
__magic_name__ : Dict = TFFunnelForQuestionAnswering(config=_A )
__magic_name__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A_ : str = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : str ) -> str:
__magic_name__ : int = TFFunnelModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ) -> Any:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __lowerCAmelCase ( self : str ) -> Dict:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A_ : Any = False
A_ : str = False
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : str = TFFunnelModelTester(self , base=_A )
__magic_name__ : Dict = ConfigTester(self , config_class=_A )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A ) | 561 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 561 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
__magic_name__ , __magic_name__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCamelCase_ , dtype=jnp.bfloataa )
__magic_name__ , __magic_name__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase_ , from_pt=UpperCamelCase_ , dtype=jnp.bfloataa )
__magic_name__ = controlnet_params
__magic_name__ = '''bird'''
__magic_name__ = jax.device_count()
__magic_name__ = pipe.prepare_text_inputs([prompts] * num_samples )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__magic_name__ = pipe.prepare_image_inputs([canny_image] * num_samples )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = pipe(
prompt_ids=UpperCamelCase_ , image=UpperCamelCase_ , params=UpperCamelCase_ , prng_seed=UpperCamelCase_ , num_inference_steps=50 , jit=UpperCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
__magic_name__ , __magic_name__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCamelCase_ , dtype=jnp.bfloataa )
__magic_name__ , __magic_name__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase_ , from_pt=UpperCamelCase_ , dtype=jnp.bfloataa )
__magic_name__ = controlnet_params
__magic_name__ = '''Chef in the kitchen'''
__magic_name__ = jax.device_count()
__magic_name__ = pipe.prepare_text_inputs([prompts] * num_samples )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__magic_name__ = pipe.prepare_image_inputs([pose_image] * num_samples )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = pipe(
prompt_ids=UpperCamelCase_ , image=UpperCamelCase_ , params=UpperCamelCase_ , prng_seed=UpperCamelCase_ , num_inference_steps=50 , jit=UpperCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 717 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( __UpperCamelCase ) -> Union[str, Any]:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__magic_name__ = tmp_path_factory.getbasetemp() / '''cache'''
__magic_name__ = test_hf_cache_home / '''datasets'''
__magic_name__ = test_hf_cache_home / '''metrics'''
__magic_name__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__UpperCamelCase ) )
__magic_name__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__magic_name__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='''session''' )
def lowercase ( ) -> Any:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def lowercase ( __UpperCamelCase ) -> str:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __UpperCamelCase )
@pytest.fixture
def lowercase ( __UpperCamelCase ) -> int:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __UpperCamelCase )
| 190 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[int] = """ml.p3.2xlarge"""
lowerCamelCase_ : Optional[Any] = """accelerate_sagemaker_execution_role"""
lowerCamelCase_ : Optional[Any] = """hf-sm"""
lowerCamelCase_ : Optional[Any] = """us-east-1"""
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : int = """accelerate-sagemaker-1"""
lowerCamelCase_ : Union[str, Any] = """1.6"""
lowerCamelCase_ : Any = """4.4"""
lowerCamelCase_ : Dict = """train.py"""
lowerCamelCase_ : List[Any] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCamelCase_ : Optional[int] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCamelCase : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , UpperCamelCase__ )
assert isinstance(converted_args["do_train"] , UpperCamelCase__ )
assert isinstance(converted_args["epochs"] , UpperCamelCase__ )
assert isinstance(converted_args["learning_rate"] , UpperCamelCase__ )
assert isinstance(converted_args["max_steps"] , UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 311 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 311 | 1 |
'''simple docstring'''
import socket
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase_ : str = socket.gethostname()
lowerCAmelCase_ : str = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowerCAmelCase_ : Optional[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(A__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 398 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> str:
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = {}
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Optional[Any]:
if vertex not in self.adjacency:
lowerCAmelCase_ : List[Any] = {}
self.num_vertices += 1
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Dict:
self.add_vertex(lowerCamelCase )
self.add_vertex(lowerCamelCase )
if head == tail:
return
lowerCAmelCase_ : List[Any] = weight
lowerCAmelCase_ : Tuple = weight
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Any = self.get_edges()
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_ : List[str] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase : e[2] )
for i in range(len(lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCAmelCase_ : Any = edges[i][2] + 1
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[str] = edge
lowerCAmelCase_ : Optional[int] = weight
lowerCAmelCase_ : Optional[int] = weight
def __str__( self : Optional[Any] ) -> Any:
lowerCAmelCase_ : List[str] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCAmelCase_ : List[Any] = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : Optional[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowercase ( self : int ) -> int:
return self.adjacency.keys()
@staticmethod
def __lowercase ( lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any]=None ) -> Any:
lowerCAmelCase_ : str = Graph()
if vertices is None:
lowerCAmelCase_ : Dict = []
if edges is None:
lowerCAmelCase_ : List[str] = []
for vertex in vertices:
g.add_vertex(lowerCamelCase )
for edge in edges:
g.add_edge(*lowerCamelCase )
return g
class __snake_case :
"""simple docstring"""
def __init__( self : str ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Optional[Any] = {}
def __len__( self : Optional[Any] ) -> int:
return len(self.parent )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
if item in self.parent:
return self.find(lowerCamelCase )
lowerCAmelCase_ : List[Any] = item
lowerCAmelCase_ : Dict = 0
return item
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowerCamelCase )
if item != self.parent[item]:
lowerCAmelCase_ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> List[str]:
lowerCAmelCase_ : List[str] = self.find(lowerCamelCase )
lowerCAmelCase_ : List[str] = self.find(lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCAmelCase_ : Optional[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCAmelCase_ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCAmelCase_ : Any = roota
return roota
return None
@staticmethod
def __lowercase ( lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : Optional[int] = graph.num_vertices
lowerCAmelCase_ : Tuple = Graph.UnionFind()
lowerCAmelCase_ : int = []
while num_components > 1:
lowerCAmelCase_ : str = {}
for vertex in graph.get_vertices():
lowerCAmelCase_ : int = -1
lowerCAmelCase_ : int = graph.get_edges()
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = edge
lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase )
lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCAmelCase_ : List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCAmelCase_ : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = cheap_edge[vertex]
if union_find.find(lowerCamelCase ) != union_find.find(lowerCamelCase ):
union_find.union(lowerCamelCase , lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
lowerCAmelCase_ : Tuple = num_components - 1
lowerCAmelCase_ : Tuple = Graph.build(edges=lowerCamelCase )
return mst
| 398 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Optional[int]=None , lowercase : int=None , lowercase : Optional[int]=None ) -> List[Any]:
__snake_case : Union[str, Any] = True
while ask_again:
__snake_case : List[str] = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple=[] , lowercase : List[str]=None , lowercase : Dict=0 ) -> str:
__snake_case : List[Any] = BulletMenu(lowercase , lowercase )
__snake_case : int = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def lowerCAmelCase__( lowercase : str ) -> Any:
__snake_case : Any = int(lowercase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase__( lowercase : Dict ) -> Optional[int]:
__snake_case : Dict = int(lowercase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase__( lowercase : int ) -> Optional[Any]:
__snake_case : str = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase__( lowercase : List[str] ) -> List[str]:
__snake_case : Optional[Any] = int(lowercase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Union[str, Any]:
__snake_case : List[str] = int(lowercase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase__( lowercase : Any ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class _lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = super()._format_usage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__snake_case : str = usage.replace("<command> [<args>] " , "" )
return usage
| 243 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243 | 1 |
import os
from distutils.util import strtobool
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for e in env_keys:
UpperCAmelCase_ =int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def a__ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def a__ ( lowercase__ , lowercase__="no" ):
'''simple docstring'''
UpperCAmelCase_ =os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 550 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowercase : Optional[int] =TypeVar("""T""")
def a__ ( lowercase__ ):
'''simple docstring'''
return (position - 1) // 2
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 1
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 2
class A ( Generic[T] ):
def __init__( self: List[str] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __len__( self: Union[str, Any] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self: Dict ) -> str:
'''simple docstring'''
return str(self.heap )
def lowerCAmelCase__ ( self: Any ) -> bool:
'''simple docstring'''
return self.elements == 0
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
UpperCAmelCase_ =self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ =(elem, weight)
if position > 0:
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
if curr_pos == 0:
return None
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ =get_child_left_position(_lowerCAmelCase )
UpperCAmelCase_ =get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ , UpperCAmelCase_ =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCAmelCase_ =nodea_pos
UpperCAmelCase_ =nodea_pos
class A ( Generic[T] ):
def __init__( self: Tuple ) -> None:
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __repr__( self: List[str] ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self: Optional[Any] ) -> int:
'''simple docstring'''
return self.nodes
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
if node not in self.connections:
UpperCAmelCase_ ={}
self.nodes += 1
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
UpperCAmelCase_ =weight
UpperCAmelCase_ =weight
def a__ ( lowercase__ , ):
'''simple docstring'''
UpperCAmelCase_ ={node: maxsize for node in graph.connections}
UpperCAmelCase_ ={node: None for node in graph.connections}
UpperCAmelCase_ =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase__ , lowercase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCAmelCase_ =priority_queue.extract_min()
UpperCAmelCase_ =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCAmelCase_ =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
return dist, parent
| 550 | 1 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : int ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 16 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 602 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __snake_case( unittest.TestCase ):
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
ops.enable_eager_execution_internal()
_SCREAMING_SNAKE_CASE = tf.config.list_physical_devices('''CPU''' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_SCREAMING_SNAKE_CASE = tf.config.list_logical_devices(device_type='''CPU''' )
_SCREAMING_SNAKE_CASE = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_SCREAMING_SNAKE_CASE = GradientAccumulator()
_SCREAMING_SNAKE_CASE = tf.Variable([4.0, 3.0] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = create_optimizer(5e-5 , 10 , 5 )
_SCREAMING_SNAKE_CASE = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ , A_ ):
with strategy.scope():
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ , A_ ):
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 168 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case( __A , unittest.TestCase ):
_A = DanceDiffusionPipeline
_A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
_A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_A = False
_A = False
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_SCREAMING_SNAKE_CASE = IPNDMScheduler()
_SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def A ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(A_ )
_SCREAMING_SNAKE_CASE = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(**A_ )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
_SCREAMING_SNAKE_CASE = pipe(**A_ )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def A ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def A ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(generator=A_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device
_SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(generator=A_ , num_inference_steps=100 , audio_length_in_s=4.096 )
_SCREAMING_SNAKE_CASE = output.audios
_SCREAMING_SNAKE_CASE = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_SCREAMING_SNAKE_CASE = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 168 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=56 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=99 , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu_new" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : str=512 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : int="block_sparse" , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : int=3 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_choices
lowerCamelCase__ = rescale_embeddings
lowerCamelCase__ = attention_type
lowerCamelCase__ = use_bias
lowerCamelCase__ = block_size
lowerCamelCase__ = num_random_blocks
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_attention_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Tuple ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Union[str, Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Dict ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str ):
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[int] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="outputs" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 258 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
lowerCamelCase__ = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
lowerCamelCase__ , lowerCamelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCamelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
assert base_extractor.is_extractable(__lowercase )
lowerCamelCase__ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding="""utf-8""" )
else:
lowerCamelCase__ = output_path.read_text(encoding="""utf-8""" )
lowerCamelCase__ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
lowerCamelCase__ = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
lowerCamelCase__ = input_paths[compression_format]
if input_path is None:
lowerCamelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
lowerCamelCase__ = Extractor.infer_extractor_format(__lowercase )
assert extractor_format is not None
lowerCamelCase__ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__lowercase , __lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding="""utf-8""" )
else:
lowerCamelCase__ = output_path.read_text(encoding="""utf-8""" )
lowerCamelCase__ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
import tarfile
lowerCamelCase__ = tmp_path / """data_dot_dot"""
directory.mkdir()
lowerCamelCase__ = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__lowercase , """w""" ) as f:
f.add(__lowercase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _A ( __lowercase ):
"""simple docstring"""
import tarfile
lowerCamelCase__ = tmp_path / """data_sym_link"""
directory.mkdir()
lowerCamelCase__ = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__lowercase )
with tarfile.TarFile(__lowercase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
lowerCamelCase__ = insecure_tar_files[insecure_tar_file]
lowerCamelCase__ = tmp_path / """extracted"""
TarExtractor.extract(__lowercase , __lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
lowerCamelCase__ = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__lowercase )
assert zipfile.is_zipfile(str(__lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowercase ) # but we're right
| 258 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("""lowercase""" , SCREAMING_SNAKE_CASE) != do_lower_case
or pre_tok_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE) != strip_accents
):
_lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type"""))
_lowerCamelCase : List[str] = do_lower_case
_lowerCamelCase : str = strip_accents
_lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = do_lower_case
def __getstate__( self) -> str:
_lowerCamelCase : Any = self.__dict__.copy()
_lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Tuple = d
_lowerCamelCase : List[Any] = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCamelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Any:
_lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
_lowerCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =KandinskyVaaPipeline
snake_case_ =[
"""image_embeds""",
"""negative_image_embeds""",
]
snake_case_ =["""image_embeds""", """negative_image_embeds"""]
snake_case_ =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ =False
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return 1_00
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ : int = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.dummy_unet
lowerCAmelCase__ : int = self.dummy_movq
lowerCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=__lowerCamelCase ,)
lowerCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Dict = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Any = '''cpu'''
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
lowerCAmelCase__ : str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : int = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : List[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) ,return_dict=__lowerCamelCase ,)[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCAmelCase__ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' ,torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : str = '''red cat, 4k photo'''
lowerCAmelCase__ : Dict = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = pipe_prior(
__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowerCAmelCase__ : int = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ : Any = pipeline(
image_embeds=__lowerCamelCase ,negative_image_embeds=__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=1_00 ,output_type='''np''' ,)
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__lowerCamelCase ,__lowerCamelCase )
| 647 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowerCAmelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_lowerCAmelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_lowerCAmelCase = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _lowerCamelCase ( self , a__ , a__ ):
A_ : Optional[int] = 0.0
for i, j in zip(a__ , a__ ):
n_correct += 1.0 if math_equivalence.is_equiv(a__ , a__ ) else 0.0
A_ : List[str] = n_correct / len(a__ )
return {
"accuracy": accuracy,
}
| 481 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 481 | 1 |
"""simple docstring"""
import torch
from torch import nn
class UpperCamelCase (nn.Module ):
def __init__( self :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any]=1 , __magic_name__ :List[str]=False ) ->List[str]:
super().__init__()
lowercase : Optional[Any] = n_token
lowercase : Any = d_embed
lowercase : Optional[int] = d_proj
lowercase : List[str] = cutoffs + [n_token]
lowercase : Union[str, Any] = [0] + self.cutoffs
lowercase : Dict = div_val
lowercase : Optional[int] = self.cutoffs[0]
lowercase : Any = len(self.cutoffs ) - 1
lowercase : List[str] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase : int = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase : Optional[Any] = nn.ModuleList()
lowercase : Dict = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
else:
self.out_projs.append(__magic_name__ )
self.out_layers.append(nn.Linear(__magic_name__ , __magic_name__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
self.out_layers.append(nn.Linear(__magic_name__ , r_idx - l_idx ) )
lowercase : List[Any] = keep_order
def __snake_case ( self :str , __magic_name__ :List[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :str ) ->Optional[int]:
if proj is None:
lowercase : List[str] = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase : Any = nn.functional.linear(__magic_name__ , proj.t().contiguous() )
lowercase : Union[str, Any] = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __snake_case ( self :List[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any]=None , __magic_name__ :Optional[Any]=False ) ->Dict:
if labels is not None:
# Shift so that tokens < n predict n
lowercase : int = hidden[..., :-1, :].contiguous()
lowercase : int = labels[..., 1:].contiguous()
lowercase : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowercase : List[str] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowercase : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase : Tuple = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase : Any = labels != -100
lowercase : List[str] = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowercase : Tuple = (
-nn.functional.log_softmax(__magic_name__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase : str = nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Tuple = self.out_layers[0].weight[l_idx:r_idx]
lowercase : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase : List[Any] = self.out_layers[i].weight
lowercase : List[Any] = self.out_layers[i].bias
if i == 0:
lowercase : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowercase , lowercase , lowercase : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
lowercase : Any = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : List[str] = nn.functional.log_softmax(__magic_name__ , dim=1 )
if labels is None:
lowercase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase : List[Any] = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowercase : List[str] = 0
lowercase : List[Any] = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowercase , lowercase : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase : List[Any] = (labels >= l_idx) & (labels < r_idx)
lowercase : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase : str = labels.index_select(0 , __magic_name__ ) - l_idx
lowercase : List[str] = head_logprob.index_select(0 , __magic_name__ )
lowercase : Any = hidden.index_select(0 , __magic_name__ )
else:
lowercase : List[str] = hidden
if i == 0:
if labels is not None:
lowercase : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase : Tuple = weights[i], biases[i], self.out_projs[i]
lowercase : Dict = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Optional[int] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowercase : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __magic_name__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __snake_case ( self :Dict , __magic_name__ :Any ) ->Tuple:
if self.n_clusters == 0:
lowercase : Tuple = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
lowercase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase : List[Any] = self.out_layers[i].weight
lowercase : List[str] = self.out_layers[i].bias
if i == 0:
lowercase : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowercase , lowercase , lowercase : List[Any] = weights[0], biases[0], self.out_projs[0]
lowercase : int = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase : Union[str, Any] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowercase : List[str] = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowercase , lowercase : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase : str = weights[i], biases[i], self.out_projs[i]
lowercase : Optional[Any] = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Dict = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowercase : int = head_logprob[:, -i] + tail_logprob_i
lowercase : int = logprob_i
return out
| 264 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase ( _A = "laptop" ) -> DataFrame:
lowercase : List[str] = F"""https://www.amazon.in/laptop/s?k={product}"""
lowercase : str = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowercase : str = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
lowercase : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowercase : str = item.ha.text
lowercase : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowercase : Tuple = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowercase : Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowercase : Tuple = """Not available"""
try:
lowercase : Any = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowercase : Any = """"""
try:
lowercase : List[str] = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
lowercase : Tuple = float("""nan""" )
except AttributeError:
pass
lowercase : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase : Tuple = """ """
lowercase : List[str] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 264 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> List[str]:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
A__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
A__ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
A__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
A__ = weights[0][0][0]
A__ = np.asarray(layer_norm_a[0] )
A__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
A__ = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
A__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
A__ = intermediate_weights[2]
# layernorm 2
A__ = np.asarray(intermediate_weights[0][0] )
A__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
A__ = np.asarray(intermediate_weights[1][0] )
A__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
A__ = np.asarray(intermediate_weights[4][0] )
A__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
A__ = torch_model.reformer
# word embeds
A__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
A__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
A__ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
A__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
A__ = np.asarray(weights[7][0] )
A__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
A__ = np.asarray(weights[9][0] )
A__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
A__ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
A__ = pickle.load(SCREAMING_SNAKE_CASE__ )['weights']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 586 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Any )-> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case__ ( self : List[str],lowercase_ : List[List[List[str]]],lowercase_ : List[List[str]],lowercase_ : int = 1,lowercase_ : int = 4,)-> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_,hypotheses=lowercase_,min_len=lowercase_,max_len=lowercase_ )
}
| 586 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase__ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase__ : List[Any] = ya
UpperCAmelCase__ : Optional[int] = xa
for k in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
lowerCAmelCase = r"""\w+[.]\d+"""
lowerCAmelCase = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
lowerCAmelCase = key.replace(_SCREAMING_SNAKE_CASE , """_""".join(pat.split(""".""" ) ) )
return key
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=42 ) -> str:
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = flatten_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase = rename_key(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCAmelCase = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
lowerCAmelCase = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 705 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = FlaxAutoencoderKL
@property
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = 4
lowerCAmelCase = 3
lowerCAmelCase = (32, 32)
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.uniform(A_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __snake_case ( self ) -> str:
lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict | 344 | 0 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PriorTransformer
SCREAMING_SNAKE_CASE__ = '''hidden_states'''
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : Dict = 8
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any]=0 ):
'''simple docstring'''
torch.manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : str = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
SCREAMING_SNAKE_CASE : str = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = self.model_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
SCREAMING_SNAKE_CASE : Tuple = model.to(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE : int = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : int = output[0, :5].flatten().cpu()
print(lowerCamelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-2 ) )
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=77 , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
torch.manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Any = embedding_dim
SCREAMING_SNAKE_CASE : int = num_embeddings
SCREAMING_SNAKE_CASE : int = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_seed_input(seed=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**lowerCamelCase_ )[0]
assert list(sample.shape ) == [1, 7_68]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, :8].flatten().cpu()
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
| 379 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE : int = dict(zip(__UpperCAmelCase, range(len(__UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE : int = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE : Dict = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname, __UpperCAmelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
def lowercase__ (self : List[Any], **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token='''!''', **__UpperCAmelCase )
def lowercase__ (self : Dict, **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token='''!''', **__UpperCAmelCase )
def lowercase__ (self : Optional[int], **__UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **__UpperCAmelCase )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer, __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor, __UpperCAmelCase )
def lowercase__ (self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : int = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __UpperCAmelCase )
def lowercase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : int = image_processor(__UpperCAmelCase, return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Dict = processor(images=__UpperCAmelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase__ (self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
SCREAMING_SNAKE_CASE : Optional[int] = processor(text=__UpperCAmelCase, return_tensors='''np''' )
SCREAMING_SNAKE_CASE : str = tokenizer(__UpperCAmelCase, return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def lowercase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = processor(text=__UpperCAmelCase, images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : str = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = ['''cat''', '''nasa badge''']
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = 16
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = [['''cat''', '''nasa badge'''], ['''person''']]
SCREAMING_SNAKE_CASE : Dict = processor(text=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = 16
SCREAMING_SNAKE_CASE : Dict = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''google/owlvit-base-patch32'''
SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = ['''cat''', '''nasa badge''']
SCREAMING_SNAKE_CASE : Tuple = processor(text=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : str = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def lowercase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = processor(images=__UpperCAmelCase, query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase, image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[str] = processor.batch_decode(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, __UpperCAmelCase )
| 355 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a__ ( _lowercase, _lowercase ):
@register_to_config
def __init__(self : List[str], __UpperCAmelCase : int = 128, __UpperCAmelCase : int = 256, __UpperCAmelCase : float = 2000.0, __UpperCAmelCase : int = 768, __UpperCAmelCase : int = 12, __UpperCAmelCase : int = 12, __UpperCAmelCase : int = 64, __UpperCAmelCase : int = 2048, __UpperCAmelCase : float = 0.1, ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : int = nn.Sequential(
nn.Linear(__UpperCAmelCase, d_model * 4, bias=__UpperCAmelCase ), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=__UpperCAmelCase ), nn.SiLU(), )
SCREAMING_SNAKE_CASE : List[Any] = nn.Embedding(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(p=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE : List[Any] = DecoderLayer(d_model=__UpperCAmelCase, d_kv=__UpperCAmelCase, num_heads=__UpperCAmelCase, d_ff=__UpperCAmelCase, dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = TaLayerNorm(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(p=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : List[Any], __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.mul(query_input.unsqueeze(-1 ), key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase__ (self : str, __UpperCAmelCase : int, __UpperCAmelCase : Optional[int], __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time, ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[str] = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE : List[str] = torch.broadcast_to(
torch.arange(__UpperCAmelCase, device=decoder_input_tokens.device ), (batch, seq_length), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
SCREAMING_SNAKE_CASE : Dict = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE : Tuple = [(x, self.encoder_decoder_mask(__UpperCAmelCase, __UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1 )
SCREAMING_SNAKE_CASE : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE : Union[str, Any] = lyr(
__UpperCAmelCase, conditioning_emb=__UpperCAmelCase, encoder_hidden_states=__UpperCAmelCase, encoder_attention_mask=__UpperCAmelCase, )[0]
SCREAMING_SNAKE_CASE : str = self.decoder_norm(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = self.post_dropout(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.spec_out(__UpperCAmelCase )
return spec_out
class a__ ( nn.Module ):
def __init__(self : Union[str, Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : int, __UpperCAmelCase : Optional[int], __UpperCAmelCase : int, __UpperCAmelCase : Tuple, __UpperCAmelCase : Any=1e-6 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase, d_kv=__UpperCAmelCase, num_heads=__UpperCAmelCase, dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase, d_kv=__UpperCAmelCase, num_heads=__UpperCAmelCase, dropout_rate=__UpperCAmelCase, layer_norm_epsilon=__UpperCAmelCase, ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase, d_ff=__UpperCAmelCase, dropout_rate=__UpperCAmelCase, layer_norm_epsilon=__UpperCAmelCase ) )
def lowercase__ (self : int, __UpperCAmelCase : int, __UpperCAmelCase : Tuple=None, __UpperCAmelCase : Dict=None, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : Optional[Any]=None, __UpperCAmelCase : Dict=None, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[0](
__UpperCAmelCase, conditioning_emb=__UpperCAmelCase, attention_mask=__UpperCAmelCase, )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE : Any = torch.where(encoder_attention_mask > 0, 0, -1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE : Any = self.layer[1](
__UpperCAmelCase, key_value_states=__UpperCAmelCase, attention_mask=__UpperCAmelCase, )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE : List[str] = self.layer[-1](__UpperCAmelCase, __UpperCAmelCase )
return (hidden_states,)
class a__ ( nn.Module ):
def __init__(self : List[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : List[Any], __UpperCAmelCase : str, __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Dict = TaLayerNorm(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4, out_features=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = Attention(query_dim=__UpperCAmelCase, heads=__UpperCAmelCase, dim_head=__UpperCAmelCase, out_bias=__UpperCAmelCase, scale_qk=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__UpperCAmelCase )
def lowercase__ (self : int, __UpperCAmelCase : Tuple, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : List[str]=None, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.FiLMLayer(__UpperCAmelCase, __UpperCAmelCase )
# Self-attention block
SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class a__ ( nn.Module ):
def __init__(self : str, __UpperCAmelCase : int, __UpperCAmelCase : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : List[Any], __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = Attention(query_dim=__UpperCAmelCase, heads=__UpperCAmelCase, dim_head=__UpperCAmelCase, out_bias=__UpperCAmelCase, scale_qk=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = TaLayerNorm(__UpperCAmelCase, eps=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Any=None, __UpperCAmelCase : List[str]=None, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self.attention(
__UpperCAmelCase, encoder_hidden_states=__UpperCAmelCase, attention_mask=attention_mask.squeeze(1 ), )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class a__ ( nn.Module ):
def __init__(self : Optional[Any], __UpperCAmelCase : Tuple, __UpperCAmelCase : List[Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Dict = TaDenseGatedActDense(d_model=__UpperCAmelCase, d_ff=__UpperCAmelCase, dropout_rate=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = TaFiLMLayer(in_features=d_model * 4, out_features=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = TaLayerNorm(__UpperCAmelCase, eps=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = nn.Dropout(__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : Any, __UpperCAmelCase : List[Any]=None ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE : Tuple = self.film(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self.DenseReluDense(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class a__ ( nn.Module ):
def __init__(self : Optional[Any], __UpperCAmelCase : Any, __UpperCAmelCase : str, __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = NewGELUActivation()
def lowercase__ (self : Any, __UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.act(self.wi_a(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.wi_a(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE : int = self.dropout(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = self.wo(__UpperCAmelCase )
return hidden_states
class a__ ( nn.Module ):
def __init__(self : Dict, __UpperCAmelCase : Dict, __UpperCAmelCase : Any=1e-6 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = eps
def lowercase__ (self : Any, __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = hidden_states.to(torch.floataa ).pow(2 ).mean(-1, keepdim=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE : Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a__ ( nn.Module ):
def lowercase__ (self : List[Any], __UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__UpperCAmelCase, 3.0 )) ))
class a__ ( nn.Module ):
def __init__(self : str, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(__UpperCAmelCase, out_features * 2, bias=__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.scale_bias(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = torch.chunk(__UpperCAmelCase, 2, -1 )
SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 355 | 1 |
'''simple docstring'''
from __future__ import annotations
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> bool:
'''simple docstring'''
if len(UpperCAmelCase ) == 0:
return False
_UpperCamelCase : Any = len(UpperCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,UpperCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] ,UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase_ : str = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase_ : Union[str, Any] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase_ : Optional[Any] = """""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 435 | '''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DebertaTokenizer
UpperCAmelCase__ = True
UpperCAmelCase__ = DebertaTokenizerFast
def snake_case__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_UpperCamelCase : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_UpperCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Union[str, Any] = {"unk_token": "[UNK]"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def snake_case__ ( self : Dict , **lowercase__ : str ) ->List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : Tuple , lowercase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : List[str] = "lower newer"
_UpperCamelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Dict = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = tokenizer("Hello" , "World" )
_UpperCamelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowercase__ )
@slow
def snake_case__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
_UpperCamelCase : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
_UpperCamelCase : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ )
_UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCamelCase : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_UpperCamelCase : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_UpperCamelCase : Optional[int] = tokenizer(lowercase__ , padding=lowercase__ )
_UpperCamelCase : Union[str, Any] = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["input_ids"]]
# fmt: off
_UpperCamelCase : List[Any] = {
"input_ids": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCamelCase : Any = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowercase__ )
for expected, decoded in zip(lowercase__ , lowercase__ ):
self.assertEqual(lowercase__ , lowercase__ )
| 435 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 595 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595 | 1 |
from math import pow, sqrt
def _snake_case (*__lowercase):
UpperCamelCase_ = len(lowerCamelCase__) > 0 and all(value > 0.0 for value in values)
return result
def _snake_case (__lowercase , __lowercase):
return (
round(sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(lowerCamelCase__ , lowerCamelCase__)
else ValueError('Input Error: Molar mass values must greater than 0.')
)
def _snake_case (__lowercase , __lowercase , __lowercase):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.')
)
def _snake_case (__lowercase , __lowercase , __lowercase):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.')
)
def _snake_case (__lowercase , __lowercase , __lowercase):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2) , 6)
if validate(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.')
)
def _snake_case (__lowercase , __lowercase , __lowercase):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2) / molar_mass , 6)
if validate(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.')
)
| 23 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__( cls ) -> Dict:
lowercase__ : str = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCAmelCase__( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="""test-config""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowercase__ : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-config-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Dict:
CustomConfig.register_for_auto_class()
lowercase__ : int = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowercase__ : Any = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : List[str] = c.n_embd + 1 # int
lowercase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowercase__ : int = not c.scale_attn_weights # bool
lowercase__ : Optional[int] = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(lowerCamelCase__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[str] = PretrainedConfig()
lowercase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowercase__ : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {', '.join(lowerCamelCase__ )}.''' )
def UpperCAmelCase__( self ) -> Optional[Any]:
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
lowercase__ : Dict = mock.Mock()
lowercase__ : str = 500
lowercase__ : int = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : Dict = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase__ ) as mock_head:
lowercase__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowercase__ : Union[str, Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
lowercase__ : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int] = ["""config.42.0.0.json"""]
lowercase__ : Optional[int] = 768
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , """config.4.0.0.json""" ) , os.path.join(lowerCamelCase__ , """config.42.0.0.json""" ) )
lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase__( self ) -> List[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Any = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowercase__ : Union[str, Any] = """v4.0.0"""
lowercase__ , lowercase__ : int = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : str = """v3.0.0"""
lowercase__ : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 ) | 200 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _a ( unittest.TestCase ):
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def A ( self : str ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Dict ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : str ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
A : int =Accelerator()
A : Any =(accelerator.state.process_index + 2, 10)
A : Union[str, Any] =torch.randint(0, 10, shape).to(accelerator.device)
A : List[str] =''
A : List[Any] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A : str =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A : Union[str, Any] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 708 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 358 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = Dict[str, Any]
a__ : List[Any] = List[Prediction]
@add_end_docstrings(SCREAMING_SNAKE_CASE_)
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE_):
def __init__( self , *lowercase , **lowercase ) -> str:
super().__init__(*__snake_case , **__snake_case )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = {}
if "threshold" in kwargs:
__UpperCamelCase = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *lowercase , **lowercase ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*__snake_case , **__snake_case )
def __lowerCamelCase ( self , lowercase ) -> Dict:
__UpperCamelCase = load_image(__snake_case )
__UpperCamelCase = torch.IntTensor([[image.height, image.width]] )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__UpperCamelCase = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__UpperCamelCase = target_size
return inputs
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
__UpperCamelCase = model_inputs.pop("""target_size""" )
__UpperCamelCase = self.model(**__snake_case )
__UpperCamelCase = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__UpperCamelCase = model_inputs["""bbox"""]
return model_outputs
def __lowerCamelCase ( self , lowercase , lowercase=0.9 ) -> Dict:
__UpperCamelCase = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__UpperCamelCase = target_size[0].tolist()
def unnormalize(lowercase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
__UpperCamelCase = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__UpperCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__UpperCamelCase = [unnormalize(__snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__UpperCamelCase = ["""score""", """label""", """box"""]
__UpperCamelCase = [dict(zip(__snake_case , __snake_case ) ) for vals in zip(scores.tolist() , __snake_case , __snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__UpperCamelCase = self.image_processor.post_process_object_detection(__snake_case , __snake_case , __snake_case )
__UpperCamelCase = raw_annotations[0]
__UpperCamelCase = raw_annotation["""scores"""]
__UpperCamelCase = raw_annotation["""labels"""]
__UpperCamelCase = raw_annotation["""boxes"""]
__UpperCamelCase = scores.tolist()
__UpperCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
__UpperCamelCase = [self._get_bounding_box(__snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__UpperCamelCase = ["""score""", """label""", """box"""]
__UpperCamelCase = [
dict(zip(__snake_case , __snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def __lowerCamelCase ( self , lowercase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__UpperCamelCase = box.int().tolist()
__UpperCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 601 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a ( __UpperCAmelCase : bytes , __UpperCAmelCase : int ) -> np.array:
__magic_name__: Optional[int] = f'{sampling_rate}'
__magic_name__: Union[str, Any] = """1"""
__magic_name__: Tuple = """f32le"""
__magic_name__: Optional[Any] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__UpperCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__magic_name__: Any = ffmpeg_process.communicate(__UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__magic_name__: Optional[Any] = output_stream[0]
__magic_name__: Optional[int] = np.frombuffer(__UpperCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def a ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : str = "f32le" , ) -> Optional[int]:
__magic_name__: List[Any] = f'{sampling_rate}'
__magic_name__: Optional[int] = """1"""
if format_for_conversion == "s16le":
__magic_name__: Dict = 2
elif format_for_conversion == "f32le":
__magic_name__: List[str] = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__magic_name__: str = platform.system()
if system == "Linux":
__magic_name__: Optional[Any] = """alsa"""
__magic_name__: Optional[Any] = """default"""
elif system == "Darwin":
__magic_name__: Union[str, Any] = """avfoundation"""
__magic_name__: Dict = """:0"""
elif system == "Windows":
__magic_name__: Any = """dshow"""
__magic_name__: Union[str, Any] = """default"""
__magic_name__: Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
__magic_name__: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__magic_name__: Union[str, Any] = _ffmpeg_stream(__UpperCAmelCase , __UpperCAmelCase )
for item in iterator:
yield item
def a ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[Tuple[float, float], float]] = None , __UpperCAmelCase : str = "f32le" , ) -> List[Any]:
if stream_chunk_s is not None:
__magic_name__: Optional[int] = stream_chunk_s
else:
__magic_name__: List[str] = chunk_length_s
__magic_name__: Optional[int] = ffmpeg_microphone(__UpperCAmelCase , __UpperCAmelCase , format_for_conversion=__UpperCAmelCase )
if format_for_conversion == "s16le":
__magic_name__: str = np.intaa
__magic_name__: str = 2
elif format_for_conversion == "f32le":
__magic_name__: Optional[Any] = np.floataa
__magic_name__: Dict = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__magic_name__: List[Any] = chunk_length_s / 6
__magic_name__: Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCAmelCase , (int, float) ):
__magic_name__: Union[str, Any] = [stride_length_s, stride_length_s]
__magic_name__: List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__magic_name__: List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__magic_name__: List[str] = datetime.datetime.now()
__magic_name__: Optional[Any] = datetime.timedelta(seconds=__UpperCAmelCase )
for item in chunk_bytes_iter(__UpperCAmelCase , __UpperCAmelCase , stride=(stride_left, stride_right) , stream=__UpperCAmelCase ):
# Put everything back in numpy scale
__magic_name__: int = np.frombuffer(item["""raw"""] , dtype=__UpperCAmelCase )
__magic_name__: int = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
__magic_name__: Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple[int, int] , __UpperCAmelCase : bool = False ) -> Union[str, Any]:
__magic_name__: Tuple = B""""""
__magic_name__, __magic_name__: int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__magic_name__: int = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCAmelCase ) < chunk_len:
__magic_name__: Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
__magic_name__: Union[str, Any] = (_stride_left, stride_right)
__magic_name__: Any = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
__magic_name__: str = False
yield item
__magic_name__: str = stride_left
__magic_name__: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCAmelCase ) > stride_left:
__magic_name__: List[Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
__magic_name__: Union[str, Any] = False
yield item
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> List[Any]:
__magic_name__: str = 2**2_4 # 16Mo
try:
with subprocess.Popen(__UpperCAmelCase , stdout=subprocess.PIPE , bufsize=__UpperCAmelCase ) as ffmpeg_process:
while True:
__magic_name__: Dict = ffmpeg_process.stdout.read(__UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 96 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : Tuple ):
__a : List[str] = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def __magic_name__ ( _lowerCamelCase : Optional[Any] = 1_0_0_0_0_0_0 ):
__a : Any = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 700 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = len(__UpperCamelCase )
__lowercase : Dict = len(__UpperCamelCase )
__lowercase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Dict = True
for i in range(__UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Union[str, Any] = True
if a[i].islower():
__lowercase : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__a , __a = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__a = _max - _min + 1
__a , __a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__a = i - _min
__a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__a = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : str = input("""Enter numbers separated by comma:\n""")
lowerCamelCase_ : Tuple = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 559 | 0 |
_snake_case = 8.3_1_4_4_5_9_8
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 703 |
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# to overwrite at feature extractactor specific tests
lowercase_ = None
lowercase_ = None
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(_A , "feature_size"))
self.assertTrue(hasattr(_A , "sampling_rate"))
self.assertTrue(hasattr(_A , "padding_value"))
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__: Optional[int] =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Optional[Any] =feat_extract.model_input_names[0]
lowerCamelCase__: int =BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_A) == len(_A) for x, y in zip(_A , processed_features[input_name])))
lowerCamelCase__: str =self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A)
lowerCamelCase__: Optional[Any] =BatchFeature({input_name: speech_inputs} , tensor_type="np")
lowerCamelCase__: str =processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCamelCase__: Dict =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A)
lowerCamelCase__: List[str] =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Optional[int] =feat_extract.model_input_names[0]
lowerCamelCase__: Tuple =BatchFeature({input_name: speech_inputs} , tensor_type="pt")
lowerCamelCase__: str =processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCamelCase__: Any =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A)
lowerCamelCase__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Union[str, Any] =feat_extract.model_input_names[0]
lowerCamelCase__: Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type="tf")
lowerCamelCase__: Union[str, Any] =processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCamelCase__: Optional[int] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]=False) ->Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase_ : Any):
lowerCamelCase__: Dict =len(input[0])
for input_slice in input[1:]:
if len(_A) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]):
if len(_A) != len(_A):
return False
for input_slice_a, input_slice_a in zip(_A , _A):
if not np.allclose(np.asarray(_A) , np.asarray(_A) , atol=1E-3):
return False
return True
lowerCamelCase__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: List[str] =self.feat_extract_tester.prepare_inputs_for_common(numpify=_A)
lowerCamelCase__: str =feat_extract.model_input_names[0]
lowerCamelCase__: Union[str, Any] =BatchFeature({input_name: speech_inputs})
lowerCamelCase__: Any =self.feat_extract_tester.seq_length_diff
lowerCamelCase__: Tuple =self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase__: Optional[int] =self.feat_extract_tester.min_seq_length
lowerCamelCase__: List[str] =self.feat_extract_tester.batch_size
lowerCamelCase__: Tuple =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase__: Any =feat_extract.pad(_A , padding=_A)
lowerCamelCase__: List[Any] =input_a[input_name]
lowerCamelCase__: int =feat_extract.pad(_A , padding="longest")
lowerCamelCase__: Optional[int] =input_a[input_name]
lowerCamelCase__: str =feat_extract.pad(_A , padding="max_length" , max_length=len(speech_inputs[-1]))
lowerCamelCase__: int =input_a[input_name]
lowerCamelCase__: List[Any] =feat_extract.pad(_A , padding="longest" , return_tensors="np")
lowerCamelCase__: Dict =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_A):
feat_extract.pad(_A , padding="max_length")[input_name]
lowerCamelCase__: Any =feat_extract.pad(
_A , padding="max_length" , max_length=_A , return_tensors="np")
lowerCamelCase__: List[Any] =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_A))
self.assertTrue(_inputs_have_equal_length(_A))
self.assertTrue(_inputs_have_equal_length(_A))
self.assertTrue(_inputs_are_equal(_A , _A))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__: List[Any] =feat_extract.pad(_A , pad_to_multiple_of=10)
lowerCamelCase__: Tuple =input_a[input_name]
lowerCamelCase__: Tuple =feat_extract.pad(_A , padding="longest" , pad_to_multiple_of=10)
lowerCamelCase__: Union[str, Any] =input_a[input_name]
lowerCamelCase__: Any =feat_extract.pad(
_A , padding="max_length" , pad_to_multiple_of=10 , max_length=_A)
lowerCamelCase__: Dict =input_a[input_name]
lowerCamelCase__: Union[str, Any] =feat_extract.pad(
_A , padding="max_length" , pad_to_multiple_of=10 , max_length=_A , return_tensors="np" , )
lowerCamelCase__: Any =input_a[input_name]
self.assertTrue(all(len(_A) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(_A , _A))
lowerCamelCase__: Dict =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_A) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowerCamelCase__: Optional[int] =(np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[str]=False) ->str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase_ : int):
lowerCamelCase__: Dict =len(input[0])
for input_slice in input[1:]:
if len(_A) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase_ : str , UpperCAmelCase_ : Dict):
if len(_A) != len(_A):
return False
for input_slice_a, input_slice_a in zip(_A , _A):
if not np.allclose(np.asarray(_A) , np.asarray(_A) , atol=1E-3):
return False
return True
lowerCamelCase__: Dict =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common(numpify=_A)
lowerCamelCase__: List[str] =feat_extract.model_input_names[0]
lowerCamelCase__: Optional[int] =BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowerCamelCase__: List[str] =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=_A)
lowerCamelCase__: str =input_a[input_name]
lowerCamelCase__: Optional[Any] =feat_extract.pad(_A , padding="max_length" , max_length=len(speech_inputs[0]))
lowerCamelCase__: Tuple =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A))
self.assertFalse(_inputs_have_equal_length(_A))
# truncate to smallest with np
lowerCamelCase__: str =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=_A , )
lowerCamelCase__: Tuple =input_a[input_name]
lowerCamelCase__: List[Any] =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
lowerCamelCase__: Union[str, Any] =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A))
# truncate to middle
lowerCamelCase__: Any =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=_A , return_tensors="np" , )
lowerCamelCase__: Optional[Any] =input_a[input_name]
lowerCamelCase__: Any =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=_A)
lowerCamelCase__: Union[str, Any] =input_a[input_name]
lowerCamelCase__: List[Any] =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
lowerCamelCase__: Dict =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(_A))
self.assertTrue(_inputs_have_equal_length(_A))
self.assertTrue(_inputs_are_equal(_A , _A))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A):
feat_extract.pad(_A , truncation=_A)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A):
feat_extract.pad(_A , padding="longest" , truncation=_A)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A):
feat_extract.pad(_A , padding="longest" , truncation=_A)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_A):
feat_extract.pad(_A , padding="max_length" , truncation=_A)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__: Dict =12
lowerCamelCase__: str =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_A , truncation=_A , )
lowerCamelCase__: Optional[int] =input_a[input_name]
lowerCamelCase__: str =feat_extract.pad(
_A , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=_A , )
lowerCamelCase__: Optional[Any] =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase__: Tuple =len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase__: Tuple =((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(_A))
self.assertFalse(_inputs_have_equal_length(_A))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
self._check_padding(numpify=_A)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
self._check_padding(numpify=_A)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
self._check_truncation(numpify=_A)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
self._check_truncation(numpify=_A)
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Tuple =self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__: Optional[Any] =feat_extract.model_input_names[0]
lowerCamelCase__: str =BatchFeature({input_name: speech_inputs})
lowerCamelCase__: Dict =feat_extract.pad(_A , padding="longest" , return_tensors="np")[input_name]
lowerCamelCase__: List[Any] =feat_extract.pad(_A , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.feature_extraction_class(**self.feat_extract_dict)
lowerCamelCase__: Optional[Any] =self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__: Any =feat_extract.model_input_names[0]
lowerCamelCase__: str =BatchFeature({input_name: speech_inputs})
lowerCamelCase__: Any =feat_extract.pad(_A , padding="longest" , return_tensors="np")[input_name]
lowerCamelCase__: List[Any] =feat_extract.pad(_A , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.feat_extract_dict
lowerCamelCase__: Union[str, Any] =True
lowerCamelCase__: str =self.feature_extraction_class(**_A)
lowerCamelCase__: int =self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__: List[Any] =[len(_A) for x in speech_inputs]
lowerCamelCase__: Union[str, Any] =feat_extract.model_input_names[0]
lowerCamelCase__: str =BatchFeature({input_name: speech_inputs})
lowerCamelCase__: List[str] =feat_extract.pad(_A , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , _A)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _A)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.feat_extract_dict
lowerCamelCase__: Dict =True
lowerCamelCase__: str =self.feature_extraction_class(**_A)
lowerCamelCase__: Dict =self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__: List[str] =[len(_A) for x in speech_inputs]
lowerCamelCase__: List[str] =feat_extract.model_input_names[0]
lowerCamelCase__: Optional[int] =BatchFeature({input_name: speech_inputs})
lowerCamelCase__: Any =min(_A)
lowerCamelCase__: Union[str, Any] =feat_extract.pad(
_A , padding="max_length" , max_length=_A , truncation=_A , return_tensors="np")
self.assertIn("attention_mask" , _A)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 59 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : int = 32
def __UpperCAmelCase ( A : List[Any] ) -> Optional[Any]:
return int(x / 2**2_0 )
class snake_case__ :
def __enter__( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[Any] , *_A : Any ) -> Any:
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ : Optional[int] = torch.cuda.memory_allocated()
UpperCAmelCase_ : int = torch.cuda.max_memory_allocated()
UpperCAmelCase_ : int = bamb(self.end - self.begin )
UpperCAmelCase_ : List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __UpperCAmelCase ( A : Accelerator , A : int = 1_6 , A : str = "bert-base-cased" , A : int = 3_2_0 , A : int = 1_6_0 , ) -> Dict:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(A )
UpperCAmelCase_ : List[str] = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"train[:{n_train}]", '''validation''': F"validation[:{n_val}]"} )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( A : Optional[int] , A : Dict ) -> Dict:
# Initialize accelerator
UpperCAmelCase_ : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[int] = config['''lr''']
UpperCAmelCase_ : List[Any] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Any = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Tuple = args.model_name_or_path
set_seed(A )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(A , A , A , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
UpperCAmelCase_ : Optional[Any] = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : List[str] = 0
# Now we train the model
UpperCAmelCase_ : Optional[Any] = {}
for epoch in range(A , A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A ):
UpperCAmelCase_ : List[Any] = model(**A )
UpperCAmelCase_ : List[str] = outputs.loss
UpperCAmelCase_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(A , A )
def __UpperCAmelCase ( ) -> List[Any]:
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A , )
parser.add_argument(
'''--output_dir''' , type=A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=A , default=A , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=A , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=A , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=A , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 541 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 701 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=2 ,UpperCamelCase=56 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=2 ,UpperCamelCase=7 ,UpperCamelCase="gelu_new" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=4 ,UpperCamelCase="block_sparse" ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=3 ,) -> Union[str, Any]:
snake_case__ :Any = parent
snake_case__ :Tuple = batch_size
snake_case__ :Dict = seq_length
snake_case__ :List[str] = is_training
snake_case__ :List[str] = use_attention_mask
snake_case__ :int = use_token_type_ids
snake_case__ :int = use_labels
snake_case__ :List[str] = vocab_size
snake_case__ :Tuple = hidden_size
snake_case__ :int = num_hidden_layers
snake_case__ :int = num_attention_heads
snake_case__ :List[Any] = intermediate_size
snake_case__ :Optional[Any] = hidden_act
snake_case__ :Dict = hidden_dropout_prob
snake_case__ :Dict = attention_probs_dropout_prob
snake_case__ :Optional[Any] = max_position_embeddings
snake_case__ :Optional[int] = type_vocab_size
snake_case__ :str = type_sequence_label_size
snake_case__ :Any = initializer_range
snake_case__ :str = num_choices
snake_case__ :List[str] = rescale_embeddings
snake_case__ :str = attention_type
snake_case__ :str = use_bias
snake_case__ :Optional[Any] = block_size
snake_case__ :Union[str, Any] = num_random_blocks
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :Dict = None
if self.use_attention_mask:
snake_case__ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :str = None
if self.use_token_type_ids:
snake_case__ :str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ :Dict = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Any = config_and_inputs
snake_case__ :Optional[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _snake_case ( _A , unittest.TestCase ):
_A = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> List[Any]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Dict:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Optional[int]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> Optional[Any]:
super().test_hidden_states_output()
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
snake_case__ :List[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :Optional[Any] = self._prepare_for_class(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = model_class(UpperCamelCase )
@jax.jit
def model_jitted(UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ):
return model(input_ids=UpperCamelCase ,attention_mask=UpperCamelCase ,**UpperCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ :List[Any] = model_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :Dict = model_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase ,UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=1E-5 ,UpperCamelCase="outputs" ,UpperCamelCase=None ) -> Union[str, Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) | 241 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _snake_case :
_A = 42
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='Translation' , init=_A , repr=_A )
def __call__( self ) -> Optional[int]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _snake_case :
_A = None
_A = None
_A = None
# Automatically constructed
_A = "dict"
_A = None
_A = field(default='TranslationVariableLanguages' , init=_A , repr=_A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :int = sorted(set(self.languages ) ) if self.languages else None
snake_case__ :Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Tuple = set(self.languages )
if self.languages and set(UpperCamelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(UpperCamelCase ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case__ :Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase ,UpperCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case__ , snake_case__ :Dict = zip(*sorted(UpperCamelCase ) )
return {"language": languages, "translation": translations}
def lowerCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
} | 241 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.join(args.tf_model_dir , """parameters.json""" )
SCREAMING_SNAKE_CASE__ = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
SCREAMING_SNAKE_CASE__ = args.output + """.pt"""
SCREAMING_SNAKE_CASE__ = OrderedDict()
with tf.device("""/CPU:0""" ):
SCREAMING_SNAKE_CASE__ = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE__ = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/moe""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/softmlp/kernel""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
SCREAMING_SNAKE_CASE__ = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
SCREAMING_SNAKE_CASE__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/mlp""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/p1/bias""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/p2/kernel""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/p2/bias""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/ln""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.norm.bias""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/g""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.feed_forward.norm.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/att""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
SCREAMING_SNAKE_CASE__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE__ = state[:, 0, :, :]
SCREAMING_SNAKE_CASE__ = state[:, 1, :, :]
SCREAMING_SNAKE_CASE__ = state[:, 2, :, :]
SCREAMING_SNAKE_CASE__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/o/kernel""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
SCREAMING_SNAKE_CASE__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/an""" ):
SCREAMING_SNAKE_CASE__ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.norm.bias""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.endswith("""/g""" ):
SCREAMING_SNAKE_CASE__ = """model.blocks.%d.self_attn.norm.weight""" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
SCREAMING_SNAKE_CASE__ = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE__ = """model.%s.weight""" % nlayer
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
if key_name.startswith("""model/wte""" ):
SCREAMING_SNAKE_CASE__ = """lm_head.weight"""
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name.startswith("""model/wob""" ):
SCREAMING_SNAKE_CASE__ = """final_logits_bias"""
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE__ = """model.last_project.weight"""
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE__ = """model.last_project.bias"""
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
A_ : List[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 712 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=3_7 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Any = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE__ = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
| 616 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Tuple = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
_lowerCamelCase : str = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
_lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
_lowerCamelCase : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
_lowerCamelCase : str = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModel)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
lowercase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 703 |
def A__ ( __A : int , __A : float , __A : float ) ->float:
return round(float(moles / volume ) * nfactor )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''SpeechT5FeatureExtractor'''
a__ ='''SpeechT5Tokenizer'''
def __init__( self , A , A ) -> Any:
super().__init__(A , A )
def __call__( self , *A , **A ) -> Any:
_UpperCAmelCase : Tuple = kwargs.pop('''audio''' , A )
_UpperCAmelCase : Optional[Any] = kwargs.pop('''text''' , A )
_UpperCAmelCase : str = kwargs.pop('''text_target''' , A )
_UpperCAmelCase : List[str] = kwargs.pop('''audio_target''' , A )
_UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , A )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
_UpperCAmelCase : Union[str, Any] = self.feature_extractor(A , *A , sampling_rate=A , **A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(A , **A )
else:
_UpperCAmelCase : int = None
if audio_target is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor(audio_target=A , *A , sampling_rate=A , **A )
_UpperCAmelCase : Tuple = targets['''input_values''']
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(A , **A )
_UpperCAmelCase : Any = targets['''input_ids''']
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Optional[Any] = labels
_UpperCAmelCase : List[str] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
_UpperCAmelCase : List[Any] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = kwargs.pop('''input_values''' , A )
_UpperCAmelCase : Optional[Any] = kwargs.pop('''input_ids''' , A )
_UpperCAmelCase : Tuple = kwargs.pop('''labels''' , A )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
_UpperCAmelCase : Union[str, Any] = self.feature_extractor.pad(A , *A , **A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(A , **A )
else:
_UpperCAmelCase : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(A , A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(A , **A )
_UpperCAmelCase : Union[str, Any] = targets['''input_ids''']
else:
_UpperCAmelCase : int = self.feature_extractor.feature_size
_UpperCAmelCase : int = self.feature_extractor.num_mel_bins
_UpperCAmelCase : Dict = self.feature_extractor.pad(A , *A , **A )
_UpperCAmelCase : List[str] = feature_size_hack
_UpperCAmelCase : Tuple = targets['''input_values''']
else:
_UpperCAmelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Any = labels
_UpperCAmelCase : List[Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
_UpperCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Any:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> int:
return self.tokenizer.decode(*A , **A )
| 506 |
"""simple docstring"""
from string import ascii_uppercase
_lowerCAmelCase :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
_UpperCAmelCase : Optional[int] = ''''''
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Dict = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase : Optional[int] = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
_UpperCAmelCase : Any = str(UpperCamelCase__ )
new_value += actual_value
_UpperCAmelCase : Tuple = num // base
_UpperCAmelCase : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 506 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase__ : int = 'pt'
elif is_tf_available():
UpperCamelCase__ : Optional[Any] = 'tf'
else:
UpperCamelCase__ : Dict = 'jax'
class _lowercase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ByTaTokenizer
UpperCAmelCase_ : Union[str, Any] = False
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def lowerCAmelCase__ ( self ,**lowerCamelCase_ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=20 ,lowerCamelCase_=5 ) -> Tuple[str, list]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
UpperCAmelCase__ : Any = tokenizer.decode([i] ,clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase__ : str = list(filter(lambda lowerCamelCase_ : re.match(r'''^[ a-zA-Z]+$''' ,t[1] ) ,lowerCamelCase_ ) )
UpperCAmelCase__ : Tuple = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=lowerCamelCase_ ) ,lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
UpperCAmelCase__ : List[str] = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
UpperCAmelCase__ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ : int = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
UpperCAmelCase__ : Dict = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=lowerCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
UpperCAmelCase__ : Union[str, Any] = ''' ''' + output_txt
UpperCAmelCase__ : Dict = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = self.ta_base_tokenizer
UpperCAmelCase__ : Any = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
UpperCAmelCase__ : List[Any] = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] ,batch_without_eos_added['''input_ids'''] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.ta_base_tokenizer
UpperCAmelCase__ : Dict = '''Unicode €.'''
UpperCAmelCase__ : Tuple = tokenizer(lowerCamelCase_ )
UpperCAmelCase__ : int = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] ,lowerCamelCase_ )
# decoding
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,'''Unicode €.</s>''' )
UpperCAmelCase__ : Optional[Any] = tokenizer('''e è é ê ë''' )
UpperCAmelCase__ : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] ,lowerCamelCase_ )
# decoding
UpperCAmelCase__ : Tuple = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,'''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) ,'''e è é ê ë</s>''' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Any = self.ta_base_tokenizer
UpperCAmelCase__ : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase__ : Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase__ : List[Any] = tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
if FRAMEWORK != "jax":
UpperCAmelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.ta_base_tokenizer
UpperCAmelCase__ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Any = tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' ,lowerCamelCase_ )
self.assertIn('''attention_mask''' ,lowerCamelCase_ )
self.assertNotIn('''decoder_input_ids''' ,lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.ta_base_tokenizer
UpperCAmelCase__ : List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase__ : Any = tokenizer(
text_target=lowerCamelCase_ ,max_length=32 ,padding='''max_length''' ,truncation=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.ta_base_tokenizer
UpperCAmelCase__ : Union[str, Any] = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase__ : Any = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase__ : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase__ : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_ ,text_target=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,batch['''input_ids'''][0] )
self.assertEqual(lowerCamelCase_ ,batch['''labels'''][0] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
UpperCAmelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase__ : Any = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Any = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = after_tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : int = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase__ : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase__ : List[str] = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertIn('''new_additional_special_token''' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
UpperCAmelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : List[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : Tuple = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = [f'''<extra_id_{i}>''' for i in range(125 )]
UpperCAmelCase__ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase__ : str = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ ,lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ ,lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase__ : str = tokenizer_class.from_pretrained(
lowerCamelCase_ ,)
self.assertIn(
'''an_additional_special_token''' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase__ : Optional[int] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' ,lstrip=lowerCamelCase_ )]
UpperCAmelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_ ,additional_special_tokens=lowerCamelCase_ ,)
self.assertIn('''a_new_additional_special_token''' ,tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) ,)
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
UpperCAmelCase__ : int = tokenizer_class.from_pretrained(lowerCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_tokenizers(fast=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ : List[Any] = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ : Tuple = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ ,attr + '''_id''' ,lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ ,lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ ,attr + '''_id''' ) ,lowerCamelCase_ )
setattr(lowerCamelCase_ ,attr + '''_id''' ,lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ ,lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ ,attr + '''_id''' ) ,lowerCamelCase_ )
setattr(lowerCamelCase_ ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(lowerCamelCase_ ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(lowerCamelCase_ ,'''additional_special_tokens_ids''' ) ,[] )
setattr(lowerCamelCase_ ,'''additional_special_tokens_ids''' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ ,'''additional_special_tokens''' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ ,'''additional_special_tokens_ids''' ) ,[token_id_to_test_setters] )
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ : Union[str, Any] = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 496 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638 |
'''simple docstring'''
from __future__ import annotations
import queue
class snake_case__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = data
snake_case : Tuple = None
snake_case : int = None
def _UpperCamelCase ( ) -> TreeNode:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
snake_case : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
snake_case : queue.Queue = queue.Queue()
snake_case : List[Any] = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : List[str] = q.get()
snake_case : List[str] = F'Enter the left node of {node_found.data}: '
snake_case : List[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : List[str] = left_node
q.put(SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = F'Enter the right node of {node_found.data}: '
snake_case : Optional[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : Union[str, Any] = right_node
q.put(SCREAMING_SNAKE_CASE__ )
raise
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : Any = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : str = []
while not q.empty():
snake_case : int = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = n.left
# end of while means current node doesn't have left child
snake_case : List[Any] = stack.pop()
# start to traverse its right child
snake_case : Any = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Dict = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = n.left
snake_case : Union[str, Any] = stack.pop()
print(n.data , end=''',''' )
snake_case : int = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case ,snake_case : Dict = [], []
snake_case : Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
snake_case ,snake_case : Any = divmod(width - len(SCREAMING_SNAKE_CASE__ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
lowercase__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 638 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCamelCase : Dict = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE):
def __init__( self : Dict , *_A : List[Any] , **_A : Any ) -> Union[str, Any]:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 705 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class snake_case__ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = {}
def A ( self : str , _A : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = {}
def A ( self : Dict , _A : str , _A : str , _A : float ) -> None:
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
UpperCAmelCase_ : Optional[Any] = probability
def A ( self : Tuple ) -> list[str]:
return list(self.connections )
def A ( self : Optional[int] , _A : str ) -> str:
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : str = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __UpperCAmelCase ( A : str , A : list[tuple[str, str, float]] , A : int ) -> dict[str, int]:
UpperCAmelCase_ : int = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
UpperCAmelCase_ : List[str] = Counter(graph.get_nodes() )
UpperCAmelCase_ : int = start
for _ in range(A ):
UpperCAmelCase_ : Union[str, Any] = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
'''simple docstring'''
def snake_case ( snake_case : Union[str, Any] , snake_case : List[Any] ) -> str:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(snake_case ):
for j in range(snake_case ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def snake_case ( snake_case : str , snake_case : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = [[float('inf' ) for _ in range(snake_case )] for _ in range(snake_case )]
for i in range(snake_case ):
for j in range(snake_case ):
lowerCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(snake_case ):
# looping through rows of graph array
for i in range(snake_case ):
# looping through columns of graph array
for j in range(snake_case ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase = dist[i][k] + dist[k][j]
_print_dist(snake_case , snake_case )
return dist, v
if __name__ == "__main__":
_UpperCamelCase : List[Any] = int(input("Enter number of vertices: "))
_UpperCamelCase : Any = int(input("Enter number of edges: "))
_UpperCamelCase : str = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
_UpperCamelCase : Optional[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
_UpperCamelCase : int = int(input("Enter source:"))
_UpperCamelCase : Any = int(input("Enter destination:"))
_UpperCamelCase : List[Any] = float(input("Enter weight:"))
_UpperCamelCase : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 284 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE : Tuple = PandasConfig
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={'files': files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(_SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(_SCREAMING_SNAKE_CASE )
| 284 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__SCREAMING_SNAKE_CASE = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , A__ : Optional[int] , A__ : int=7 , A__ : Optional[int]=3 , A__ : str=1_8 , A__ : List[Any]=3_0 , A__ : Dict=4_0_0 , A__ : List[Any]=None , A__ : Optional[int]=True , A__ : Dict=True , A__ : int=None , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
a__ : List[str] = parent
a__ : List[str] = batch_size
a__ : Any = num_channels
a__ : Any = image_size
a__ : List[Any] = min_resolution
a__ : Tuple = max_resolution
a__ : List[str] = size
a__ : List[Any] = do_normalize
a__ : List[Any] = do_convert_rgb
a__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
a__ : Any = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def __lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a__ : str = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCAmelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
a__ : Tuple = 2_0_4_8
a__ : Any = image_processor(_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def __lowerCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
a__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
a__ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : str = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : List[str] = image_processor(
_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
a__ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a__ : Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowerCAmelCase ):
a__ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
a__ : Optional[int] = '''Hello'''
a__ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : Any = image_processor(
_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
a__ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : Tuple = image_processor(
_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
a__ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : Any = image_processor(
_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCAmelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
a__ : int = 3
@property
def __lowerCAmelCase ( self : int ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
a__ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : int = image_processor(
_lowerCAmelCase , return_tensors='''pt''' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 709 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : str = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
a__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
a__ : str = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
a__ : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
a__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : List[str] = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
a__ : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
a__ : Any = state_dict['''embeddings.word_embeddings.weight''']
a__ : str = word_emb[ent_init_index].unsqueeze(0 )
a__ : Optional[int] = word_emb[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a__ : Union[str, Any] = state_dict[bias_name]
a__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a__ : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
a__ : str = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a__ : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
a__ : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a__ : List[Any] = state_dict['''entity_predictions.bias''']
a__ : str = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
a__ : Optional[Any] = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
a__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
a__ : Dict = state_dict[key]
else:
a__ : List[Any] = state_dict[key]
a__ , a__ : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a__ : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
a__ : Optional[int] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
a__ : List[str] = (0, 9)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Tuple = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Union[str, Any] = torch.Size((1, 33, 768) )
a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Tuple = torch.Size((1, 1, 768) )
a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a__ : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple = '''Tokyo is the capital of <mask>.'''
a__ : str = (24, 30)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Any = model(**lowerCAmelCase__ )
a__ : Optional[int] = encoding['''input_ids'''][0].tolist()
a__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
a__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
a__ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
a__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str ):
a__ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
a__ : Optional[Any] = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
a__ : Any = {}
for entry in data:
a__ : Tuple = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a__ : Union[str, Any] = entity_id
break
a__ : Dict = F'{language}:{entity_name}'
a__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 340 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """swin2sr"""
snake_case_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] ,A : Tuple=64 ,A : Tuple=1 ,A : List[Any]=3 ,A : Dict=180 ,A : int=[6, 6, 6, 6, 6, 6] ,A : Optional[Any]=[6, 6, 6, 6, 6, 6] ,A : Any=8 ,A : Optional[int]=2.0 ,A : Tuple=True ,A : int=0.0 ,A : Dict=0.0 ,A : int=0.1 ,A : List[Any]="gelu" ,A : Optional[Any]=False ,A : Union[str, Any]=0.0_2 ,A : str=1e-5 ,A : Optional[int]=2 ,A : int=1.0 ,A : Any="1conv" ,A : Tuple="pixelshuffle" ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : str = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Union[str, Any] = len(A )
UpperCAmelCase__ : str = num_heads
UpperCAmelCase__ : int = window_size
UpperCAmelCase__ : int = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = drop_path_rate
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[Any] = use_absolute_embeddings
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Dict = upscale
UpperCAmelCase__ : str = img_range
UpperCAmelCase__ : int = resi_connection
UpperCAmelCase__ : str = upsampler
| 65 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def a_ ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] =collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader:
_lowerCamelCase : Dict =reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : int =token.rstrip('\n' )
_lowerCamelCase : Tuple =index
return vocab
class A ( UpperCamelCase_ ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =['input_ids', 'attention_mask']
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]="[SEP]" , lowercase_ : Any="[SEP]" , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : int="[PAD]" , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ) -> None:
"""simple docstring"""
_lowerCamelCase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
_lowerCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
_lowerCamelCase : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCamelCase : int ={'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
_lowerCamelCase : Dict =F'''[unused{i}]'''
_lowerCamelCase : List[str] =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCamelCase : Any =12
_lowerCamelCase : Dict ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_ )
def __getstate__( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict =self.__dict__.copy()
_lowerCamelCase : Dict =None
return state
def __setstate__( self : str , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Any ={}
_lowerCamelCase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase : Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : List[Any] , lowercase_ : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Optional[int] =self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : str , lowercase_ : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Any , lowercase_ : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
_lowerCamelCase : Dict =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 464 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=A_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
__lowerCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCAmelCase = resnet(A_ , A_ , deterministic=A_ )
__lowerCAmelCase = attn(A_ , A_ , deterministic=A_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(A_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=A_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase = resnets
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __lowercase , __lowercase , __lowercase=True ):
__lowerCAmelCase = ()
for resnet in self.resnets:
__lowerCAmelCase = resnet(A_ , A_ , deterministic=A_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(A_ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase = resnet(A_ , A_ , deterministic=A_ )
__lowerCAmelCase = attn(A_ , A_ , deterministic=A_ )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(A_ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase = resnets
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase = resnet(A_ , A_ , deterministic=A_ )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(A_ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def _snake_case (self ):
# there is always at least one resnet
__lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCAmelCase = []
for _ in range(self.num_layers ):
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
def __call__(self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
__lowerCAmelCase = self.resnets[0](A_ , A_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCAmelCase = attn(A_ , A_ , deterministic=A_ )
__lowerCAmelCase = resnet(A_ , A_ , deterministic=A_ )
return hidden_states
| 700 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase : Tuple = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
require_version(deps[pkg], lowerCamelCase)
| 474 | 0 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = word.split()
def justify(lowercase , lowercase , lowercase ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = max_width - width
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
if len(lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE : Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowercase ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE : Any = []
for i in range(lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowercase )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : list[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for word in words:
if width + len(lowercase ) + len(lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowercase )
width += len(lowercase )
else:
# justify the line and add it to result
answer.append(justify(lowercase , lowercase , lowercase ) )
# reset new line and new width
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = [word], len(lowercase )
SCREAMING_SNAKE_CASE : Dict = max_width - width - len(lowercase )
answer.append(" ".join(lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4'''
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = True ,) -> Dict:
'''simple docstring'''
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
__lowercase = StableDiffusionPipeline(
vae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,unet=_lowerCamelCase ,scheduler=_lowerCamelCase ,safety_checker=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,requires_safety_checker=_lowerCamelCase ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def _UpperCAmelCase (self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self ,_lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCAmelCase (self ,_lowerCamelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> str:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
@torch.no_grad()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 50 ,_lowerCamelCase = 7.5 ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = 1 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
__lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=_lowerCamelCase ,height=_lowerCamelCase ,width=_lowerCamelCase ,num_inference_steps=_lowerCamelCase ,guidance_scale=_lowerCamelCase ,negative_prompt=_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase ,eta=_lowerCamelCase ,generator=_lowerCamelCase ,latents=_lowerCamelCase ,output_type=_lowerCamelCase ,return_dict=_lowerCamelCase ,callback=_lowerCamelCase ,callback_steps=_lowerCamelCase ,**_lowerCamelCase ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 502 | 0 |
class __A :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : int = graph
self._normalize_graph(__magic_name__ , __magic_name__ )
lowerCamelCase__ : str = len(__magic_name__ )
lowerCamelCase__ : Dict = None
def _snake_case (self , __magic_name__ , __magic_name__ ):
if sources is int:
lowerCamelCase__ : Any = [sources]
if sinks is int:
lowerCamelCase__ : List[str] = [sinks]
if len(__magic_name__ ) == 0 or len(__magic_name__ ) == 0:
return
lowerCamelCase__ : List[Any] = sources[0]
lowerCamelCase__ : List[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__magic_name__ ) > 1 or len(__magic_name__ ) > 1:
lowerCamelCase__ : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCamelCase__ : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCamelCase__ : List[Any] = max_input_flow
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCamelCase__ : List[Any] = max_input_flow
lowerCamelCase__ : str = size - 1
def _snake_case (self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[str] = algorithm(self )
class __A :
def __init__(self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = flow_network
lowerCamelCase__ : List[Any] = flow_network.verticesCount
lowerCamelCase__ : str = flow_network.sourceIndex
lowerCamelCase__ : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCamelCase__ : Optional[int] = flow_network.graph
lowerCamelCase__ : List[str] = False
def _snake_case (self ):
if not self.executed:
self._algorithm()
lowerCamelCase__ : Dict = True
def _snake_case (self ):
pass
class __A ( A_ ):
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
# use this to save your result
lowerCamelCase__ : str = -1
def _snake_case (self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __A ( A_ ):
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : Any = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCamelCase__ : str = [0] * self.verticies_count
lowerCamelCase__ : int = [0] * self.verticies_count
def _snake_case (self ):
lowerCamelCase__ : List[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCamelCase__ : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCamelCase__ : int = 0
while i < len(__magic_name__ ):
lowerCamelCase__ : List[Any] = vertices_list[i]
lowerCamelCase__ : Any = self.heights[vertex_index]
self.process_vertex(__magic_name__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__magic_name__ ) )
lowerCamelCase__ : Optional[Any] = 0
else:
i += 1
lowerCamelCase__ : Tuple = sum(self.preflow[self.source_index] )
def _snake_case (self , __magic_name__ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__magic_name__ , __magic_name__ )
self.relabel(__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Any = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCamelCase__ : Dict = self.heights[to_index]
if min_height is not None:
lowerCamelCase__ : str = min_height + 1
if __name__ == "__main__":
_lowercase = [0]
_lowercase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_lowercase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_lowercase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_lowercase = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 96 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowercase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowercase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowercase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def _snake_case (self , __magic_name__ ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__=0.9 , __magic_name__=3 , __magic_name__=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCamelCase__ : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(__magic_name__ ) , word_tokenize(__magic_name__ ) , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
else:
lowerCamelCase__ : int = [
meteor_score.single_meteor_score(__magic_name__ , __magic_name__ , alpha=__magic_name__ , beta=__magic_name__ , gamma=__magic_name__ )
for ref, pred in zip(__magic_name__ , __magic_name__ )
]
return {"meteor": np.mean(__magic_name__ )}
| 96 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
_lowerCamelCase : Optional[int] =path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
_lowerCamelCase : int =Text(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , **lowercase_ , )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Optional[Any] =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[str] =None
_lowerCamelCase : int =None
_lowerCamelCase : Optional[Any] =None
_lowerCamelCase : Any =None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
_lowerCamelCase : Tuple =self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
| 464 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_A : str = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_A : Dict = F"""https://www.google.com/search?q={query}&num=100"""
_A : str = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_A : Optional[int] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_A : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 189 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 189 | 1 |
"""simple docstring"""
UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461 | 0 |
from __future__ import annotations
def _UpperCamelCase (a__ :str , a__ :list[str] | None = None ):
"""simple docstring"""
UpperCamelCase__ = word_bank or []
# create a table
UpperCamelCase__ = len(a__ ) + 1
UpperCamelCase__ = []
for _ in range(a__ ):
table.append([] )
# seed value
UpperCamelCase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
UpperCamelCase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 706 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : int ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase ) | 86 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__lowerCamelCase : List[Any] = datasets.logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = """\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"""
__lowerCamelCase : Optional[int] = """\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"""
__lowerCamelCase : Tuple = """\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
if self.config_name == "default":
lowerCamelCase_ : Any = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
lowerCamelCase_ : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
if gpus is None:
lowerCamelCase_ : List[Any] = 1 if torch.cuda.is_available() else 0
lowerCamelCase_ : Tuple = {"src": sources, "mt": predictions, "ref": references}
lowerCamelCase_ : List[str] = [dict(zip(lowercase__ , lowercase__ ) ) for t in zip(*data.values() )]
lowerCamelCase_ : str = self.scorer.predict(lowercase__ , gpus=lowercase__ , progress_bar=lowercase__ )
return {"mean_score": mean_score, "scores": scores}
| 702 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __snake_case (__UpperCAmelCase="" ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : List[str] = AgentAudio(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase_ , lowerCamelCase_ : int = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : Tuple = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16_000 )
lowerCamelCase_ : Any = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Any = torch.randint(0 , 256 , (64, 64, 3) )
lowerCamelCase_ : str = AgentImage(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = '''Hey!'''
lowerCamelCase_ : Tuple = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 418 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE : Optional[int] = tuple[int, int]
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, ) -> None:
SCREAMING_SNAKE_CASE_ = pos_x
SCREAMING_SNAKE_CASE_ = pos_y
SCREAMING_SNAKE_CASE_ = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ = goal_x
SCREAMING_SNAKE_CASE_ = goal_y
SCREAMING_SNAKE_CASE_ = g_cost
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = self.calculate_heuristic()
SCREAMING_SNAKE_CASE_ = self.g_cost + self.h_cost
def a__ ( self ) -> float:
SCREAMING_SNAKE_CASE_ = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowercase ) + abs(_lowercase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self, _lowercase ) -> bool:
return self.f_cost < other.f_cost
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = Node(start[1], start[0], goal[1], goal[0], 0, _lowercase )
SCREAMING_SNAKE_CASE_ = Node(goal[1], goal[0], goal[1], goal[0], 99999, _lowercase )
SCREAMING_SNAKE_CASE_ = [self.start]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
def a__ ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowercase )
self.closed_nodes.append(_lowercase )
SCREAMING_SNAKE_CASE_ = self.get_successors(_lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowercase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(self.open_nodes.index(_lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowercase )
else:
self.open_nodes.append(_lowercase )
return [self.start.pos]
def a__ ( self, _lowercase ) -> list[Node]:
SCREAMING_SNAKE_CASE_ = []
for action in delta:
SCREAMING_SNAKE_CASE_ = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowercase, _lowercase, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, _lowercase, ) )
return successors
def a__ ( self, _lowercase ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ = current_node.parent
path.reverse()
return path
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase ) -> None:
SCREAMING_SNAKE_CASE_ = AStar(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = AStar(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = False
def a__ ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowercase, _lowercase )
self.fwd_astar.closed_nodes.append(_lowercase )
self.bwd_astar.closed_nodes.append(_lowercase )
SCREAMING_SNAKE_CASE_ = current_bwd_node
SCREAMING_SNAKE_CASE_ = current_fwd_node
SCREAMING_SNAKE_CASE_ = {
self.fwd_astar: self.fwd_astar.get_successors(_lowercase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowercase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowercase )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = astar.open_nodes.pop(
astar.open_nodes.index(_lowercase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowercase )
else:
astar.open_nodes.append(_lowercase )
return [self.fwd_astar.start.pos]
def a__ ( self, _lowercase, _lowercase ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = self.fwd_astar.retrace_path(_lowercase )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.retrace_path(_lowercase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : Union[str, Any] = (0, 0)
SCREAMING_SNAKE_CASE : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE : int = AStar(init, goal)
SCREAMING_SNAKE_CASE : List[str] = a_star.search()
SCREAMING_SNAKE_CASE : Any = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
SCREAMING_SNAKE_CASE : str = time.time()
SCREAMING_SNAKE_CASE : int = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE : int = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 294 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """roberta"""
def __init__( self, _lowercase=50265, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase=3072, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=512, _lowercase=2, _lowercase=0.02, _lowercase=1E-12, _lowercase=1, _lowercase=0, _lowercase=2, _lowercase="absolute", _lowercase=True, _lowercase=None, **_lowercase, ) -> Dict:
super().__init__(pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class snake_case ( lowercase_ ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 294 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = SMALL_MODEL_IDENTIFIER
__SCREAMING_SNAKE_CASE = "pt"
__SCREAMING_SNAKE_CASE = "tf"
def __lowerCAmelCase ( self, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_a )
def __lowerCAmelCase ( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(self.test_model, from_pt=_a )
model_tf.save_pretrained(_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = "mock_framework"
# Framework provided - return whatever the user provides
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model, _a )
self.assertEqual(_a, _a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(_a, _a )
self.assertEqual(_a, _a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(_a, _a )
self.assertEqual(_a, _a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(_a )
self.assertEqual(_a, self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(_a )
self.assertEqual(_a, self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_a ):
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(_a )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available", _a ):
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a, self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_torch_available", _a ):
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a, self.framework_tf )
# Both in environment -> use PyTorch
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available", _a ), patch(
"transformers.onnx.features.is_torch_available", _a ):
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a, self.framework_pt )
# Both not in environment -> raise error
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
__SCREAMING_SNAKE_CASE = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available", _a ), patch(
"transformers.onnx.features.is_torch_available", _a ):
with self.assertRaises(_a ):
__SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
| 214 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""pixel_values"""]
def __init__( self, _a = True, _a = None, _a = PILImageResampling.BILINEAR, _a = True, _a = None, _a = True, _a = 1 / 2_55, _a = True, _a = None, _a = None, **_a, ) -> None:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_56}
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self, _a, _a, _a = PILImageResampling.BICUBIC, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(_a, size=size["shortest_edge"], default_to_square=_a )
return resize(_a, size=_a, resample=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
return center_crop(_a, size=(size["height"], size["width"]), data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a ) -> np.ndarray:
return rescale(_a, scale=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray:
return normalize(_a, mean=_a, std=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> int:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_a, default_to_square=_a )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_a, size=_a, resample=_a ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=_a, size=_a ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_a, scale=_a ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_a, mean=_a, std=_a ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a, _a ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_a, tensor_type=_a )
| 214 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : list[tuple[float, float]] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(snake_case__ ) - 1
def _lowercase ( self : int , UpperCamelCase__ : float ) -> int:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) , 5 ) == 1
return output_values
def _lowercase ( self : int , UpperCamelCase__ : float ) -> Dict:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(snake_case__ )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowercase ( self : str , UpperCamelCase__ : float = 0.01 ) -> Tuple:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ , snake_case__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(snake_case__ , snake_case__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 529 | """simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCAmelCase : Dict = "CompVis/stable-diffusion-v1-1"
__lowerCAmelCase : int = "CompVis/stable-diffusion-v1-2"
__lowerCAmelCase : int = "CompVis/stable-diffusion-v1-3"
__lowerCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4"
class a_ ( __UpperCamelCase ):
def __init__( self : List[Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , snake_case__ : bool = True , ):
super()._init_()
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline(
vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , requires_safety_checker=snake_case__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return {k: getattr(self , snake_case__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : List[Any] , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Tuple , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : str , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ):
lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(snake_case__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase__ = "\nHuman: <<task>>\n\nAssistant: "
lowercase__ = "huggingface-tools/default-prompts"
lowercase__ = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str="run" ) -> Optional[Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
_a = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __lowerCamelCase ) is not None:
return prompt_or_repo_id
_a = cached_file(
__lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 276 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase__ = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase = None ) -> List[str]:
_a = (
os.path.join(__UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def a_ ( self , __UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(__UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(__UpperCamelCase ) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(__UpperCamelCase ) and not (os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ))
)
def a_ ( self , __UpperCamelCase , __UpperCamelCase = False ) -> str:
_a = self.extractor.infer_extractor_format(__UpperCamelCase )
if not extractor_format:
return input_path
_a = self._get_output_path(__UpperCamelCase )
if self._do_extract(__UpperCamelCase , __UpperCamelCase ):
self.extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return output_path
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@classmethod
@abstractmethod
def a_ ( cls , __UpperCamelCase , **__UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
...
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase = []
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
with open(__UpperCamelCase , "rb" ) as f:
return f.read(__UpperCamelCase )
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = b"" ) -> bool:
if not magic_number:
_a = max(len(__UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(__UpperCamelCase , __UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(__UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@classmethod
def a_ ( cls , __UpperCamelCase , **__UpperCamelCase ) -> bool:
return tarfile.is_tarfile(__UpperCamelCase )
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> str:
def resolved(__UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__UpperCamelCase ) )
def badpath(__UpperCamelCase , __UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__UpperCamelCase , __UpperCamelCase ) ).startswith(__UpperCamelCase )
def badlink(__UpperCamelCase , __UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(__UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__UpperCamelCase )
_a = resolved(__UpperCamelCase )
for finfo in members:
if badpath(finfo.name , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(__UpperCamelCase , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(__UpperCamelCase , __UpperCamelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_a = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase , members=TarExtractor.safemembers(__UpperCamelCase , __UpperCamelCase ) )
tar_file.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x1F\x8B''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with gzip.open(__UpperCamelCase , "rb" ) as gzip_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = b"" ) -> bool:
if super().is_extractable(__UpperCamelCase , magic_number=__UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__UpperCamelCase , "rb" ) as fp:
_a = _EndRecData(__UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(__UpperCamelCase ) # CD is where we expect it to be
if len(__UpperCamelCase ) == sizeCentralDir:
_a = struct.unpack(__UpperCamelCase , __UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with zipfile.ZipFile(__UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with lzma.open(__UpperCamelCase ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
_a = rarfile.RarFile(__UpperCamelCase )
rf.extractall(__UpperCamelCase )
rf.close()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(__UpperCamelCase , "rb" ) as ifh, open(__UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x42\x5A\x68''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
with bza.open(__UpperCamelCase , "rb" ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with pyazr.SevenZipFile(__UpperCamelCase , "r" ) as archive:
archive.extractall(__UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = [b'''\x04\x22\x4D\x18''']
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__UpperCamelCase , "rb" ) as compressed_file:
with open(__UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCamelCase , __UpperCamelCase )
class __SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCAmelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a_ ( cls ) -> str:
return max(
len(__UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(__UpperCamelCase , __UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a_ ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(__UpperCamelCase , magic_number_length=__UpperCamelCase )
except OSError:
return b""
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__UpperCamelCase , )
_a = cls.infer_extractor_format(__UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a_ ( cls , __UpperCamelCase ) -> str: # <Added version="2.4.0"/>
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(__UpperCamelCase , __UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__UpperCamelCase , magic_number=__UpperCamelCase ):
return extractor_format
@classmethod
def a_ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(__UpperCamelCase ) , exist_ok=__UpperCamelCase )
# Prevent parallel extractions
_a = str(Path(__UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase , ignore_errors=__UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__UpperCamelCase , __UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__UpperCamelCase , )
_a = extractor if extractor != "deprecated" else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(__UpperCamelCase , __UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__UpperCamelCase ):
return extractor.extract(__UpperCamelCase , __UpperCamelCase )
| 276 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = """sshleifer/bart-tiny-random"""
__a = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(__lowerCamelCase )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=__lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=__lowerCamelCase , d=__lowerCamelCase )
| 377 |
def _UpperCamelCase ( ) ->list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _UpperCamelCase ( lowerCAmelCase_ ) ->None:
assert all(row == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for row in grid )
assert all(list(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for col in zip(*lowerCAmelCase_ ) )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCAmelCase_ ) * len(grid[0] )) - total
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
return len([number for row in grid for number in row if number < 0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(lowerCAmelCase_ ):
if number < 0:
total += len(lowerCAmelCase_ ) - i
break
return total
def _UpperCamelCase ( ) ->None:
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F"""{func}(grid=grid)""" , setup=lowerCAmelCase_ , number=5_0_0 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 377 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : str = '''▁'''
UpperCamelCase__ : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCamelCase__ : Any = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
UpperCamelCase__ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = VOCAB_FILES_NAMES
__a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : int = ["input_ids", "attention_mask"]
__a : List[int] = []
__a : List[int] = []
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<pad>" ,snake_case__="<unk>" ,snake_case__="m2m100" ,snake_case__ = None ,snake_case__=8 ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : int = language_codes
SCREAMING_SNAKE_CASE_ : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE_ : List[str] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case__ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ ,tgt_lang=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,language_codes=snake_case__ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : int = load_json(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = spm_file
SCREAMING_SNAKE_CASE_ : List[str] = load_spm(snake_case__ ,self.sp_model_kwargs )
SCREAMING_SNAKE_CASE_ : List[str] = len(self.encoder )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ )
}
SCREAMING_SNAKE_CASE_ : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE_ : int = src_lang if src_lang is not None else 'en'
SCREAMING_SNAKE_CASE_ : Optional[int] = tgt_lang
SCREAMING_SNAKE_CASE_ : Dict = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE_ : Dict = num_madeup_words
@property
def snake_case ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case ( self ):
return self._src_lang
@src_lang.setter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self ,snake_case__ ):
return self.sp_model.encode(snake_case__ ,out_type=snake_case__ )
def snake_case ( self ,snake_case__ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case__ ,self.encoder[self.unk_token] )
def snake_case ( self ,snake_case__ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case__ ,self.unk_token )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ ,token_ids_a=snake_case__ ,already_has_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Tuple = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = Path(snake_case__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
SCREAMING_SNAKE_CASE_ : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def snake_case ( self ,snake_case__ ,snake_case__ = "en" ,snake_case__ = None ,snake_case__ = "ro" ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case__ ,snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : Optional[int] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = self(snake_case__ ,add_special_tokens=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_id(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tgt_lang_id
return inputs
def snake_case ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : str = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : Any = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
return self.lang_code_to_token[lang]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_lang_token(snake_case__ )
return self.lang_token_to_id[lang_token]
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> None:
"""simple docstring"""
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 )
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
from math import isqrt
def __a ( A__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(A__ ) + 1 ) )
def __a ( A__ : int = 10**6 ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 7
while prime_candidate < max_prime:
primes_count += is_prime(A__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }') | 16 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = MgpstrTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = {}
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''tester'''
SCREAMING_SNAKE_CASE_ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) , 0 )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def _lowercase (self ):
"""simple docstring"""
pass | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
for row in range(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = matrix[row][col]
SCREAMING_SNAKE_CASE : int = vector[row][0]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE : List[str] = max((abs(augmented[rowa][col]), rowa) for rowa in range(_a , _a))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a):
SCREAMING_SNAKE_CASE : Dict = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE : int = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a):
for row in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_a)
]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(_a)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for x_val, y_val in enumerate(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : Any = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE : Tuple = y_val
SCREAMING_SNAKE_CASE : Any = solve(_a , _a)
def interpolated_func(_a) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_a))
return interpolated_func
def lowerCamelCase__ ( _a):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _a = question_function , _a = 10):
SCREAMING_SNAKE_CASE : list[int] = [func(_a) for x_val in range(1 , order + 1)]
SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Callable[[int], int]
SCREAMING_SNAKE_CASE : int
for poly in polynomials:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
while func(_a) == poly(_a):
x_val += 1
ret += poly(_a)
return ret
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->bool:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , SCREAMING_SNAKE_CASE_ )
else:
return binary_search(a_list[midpoint + 1 :] , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] =input('Enter numbers separated by comma:\n').strip()
SCREAMING_SNAKE_CASE__ : Optional[int] =[int(item.strip()) for item in user_input.split(',')]
SCREAMING_SNAKE_CASE__ : str =int(input('Enter the number to be found in the list:\n').strip())
SCREAMING_SNAKE_CASE__ : str ='' if binary_search(sequence, target) else 'not '
print(F"""{target} was {not_str}found in {sequence}""")
| 714 | """simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[Any]:
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : str = num_choices
def a__ ( self ) -> Dict:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_attention_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> int:
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Any:
_lowerCamelCase : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def a__ ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : str = model(_lowercase , attention_mask=_lowercase )[0]
_lowerCamelCase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_lowerCamelCase : Dict = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 558 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = DiTPipeline
lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__UpperCamelCase ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=__UpperCamelCase ,)
lowercase_ : int = AutoencoderKL()
lowercase_ : str = DDIMScheduler()
lowercase_ : List[str] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ) -> List[Any]:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
lowercase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
lowercase_ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowercase_ : int = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = 'cpu'
lowercase_ : Optional[Any] = self.get_dummy_components()
lowercase_ : List[Any] = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
lowercase_ : int = pipe(**__UpperCamelCase ).images
lowercase_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
lowercase_ : str = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase ,1e-3 )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__UpperCamelCase ,expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Tuple = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase_ : int = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase_ : List[str] = pipe.get_label_ids(__UpperCamelCase )
lowercase_ : List[str] = pipe(__UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase_ : List[Any] = ['vase', 'umbrella']
lowercase_ : Dict = pipe.get_label_ids(__UpperCamelCase )
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Dict = pipe(__UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 425 | """simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = SpeechTaTokenizer
lowercase = False
lowercase = True
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Optional[Any] = SpeechTaTokenizer(__UpperCamelCase )
lowercase_ : Optional[Any] = AddedToken('<mask>' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
lowercase_ : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'this is a test'
lowercase_ : Any = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=20 ,__UpperCamelCase=5 ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.get_input_output_texts(__UpperCamelCase )
lowercase_ : Any = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
lowercase_ : Any = tokenizer.decode(__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = '<pad>'
lowercase_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(__UpperCamelCase ) ,81 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : List[Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase_ : Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowercase_ : Optional[Any] = tokenizer.add_tokens(__UpperCamelCase )
lowercase_ : Union[str, Any] = tokenizer.vocab_size
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,all_size + len(__UpperCamelCase ) )
lowercase_ : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
lowercase_ : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowercase_ : Any = tokenizer.add_special_tokens(__UpperCamelCase )
lowercase_ : int = tokenizer.vocab_size
lowercase_ : str = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,all_size_a + len(__UpperCamelCase ) )
lowercase_ : Optional[int] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__UpperCamelCase ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
lowercase_ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowercase_ : str = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowercase_ : Union[str, Any] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=__UpperCamelCase ,)
| 425 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ ,x % y)
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return (x * y) // greatest_common_divisor(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int = 20):
'''simple docstring'''
lowerCAmelCase__ : Any = 1
for i in range(1 ,n + 1):
lowerCAmelCase__ : List[Any] = lcm(lowerCamelCase_ ,lowerCamelCase_)
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 721 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase__ ( lowerCamelCase_ : Callable ,lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float):
'''simple docstring'''
lowerCAmelCase__ : Dict = int(np.ceil((x_end - xa) / step_size))
lowerCAmelCase__ : Any = np.zeros((n + 1,))
lowerCAmelCase__ : Union[str, Any] = ya
lowerCAmelCase__ : str = xa
for k in range(lowerCamelCase_):
lowerCAmelCase__ : Tuple = y[k] + step_size * ode_func(lowerCamelCase_ ,y[k])
lowerCAmelCase__ : str = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase_ ,y[k]) + ode_func(x + step_size ,lowerCamelCase_))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
'''simple docstring'''
from itertools import permutations
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCamelCase = [7, 11, 13, 17]
for i, test in enumerate(a_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCAmelCase ( UpperCamelCase__ = 10 ) -> int:
return sum(
int(''''''.join(map(a_ , a_ ) ) )
for num in permutations(range(a_ ) )
if is_substring_divisible(a_ ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 546 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output | 52 | 0 |
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
assert (
isinstance(snake_case_ , snake_case_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowercase__: Union[str, Any] = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__: List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
@dataclass
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=6.0 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_="fp4" , UpperCAmelCase_=False , **UpperCAmelCase_ , ) -> List[str]:
'''simple docstring'''
lowercase__: Any = load_in_abit
lowercase__: Any = load_in_abit
lowercase__: Dict = llm_inta_threshold
lowercase__: Optional[Any] = llm_inta_skip_modules
lowercase__: Optional[int] = llm_inta_enable_fpaa_cpu_offload
lowercase__: Dict = llm_inta_has_fpaa_weight
lowercase__: List[str] = bnb_abit_quant_type
lowercase__: int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__: Dict = torch.floataa
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowercase__: Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , torch.dtype):
lowercase__: int = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
self.post_init()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , UpperCAmelCase_):
raise ValueError("llm_int8_threshold must be a float")
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase_):
raise ValueError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase_):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase_):
raise ValueError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase_):
raise ValueError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase_):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version")
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = cls(**UpperCAmelCase_)
lowercase__: Optional[int] = []
for key, value in kwargs.items():
if hasattr(UpperCAmelCase_ , UpperCAmelCase_):
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
to_remove.append(UpperCAmelCase_)
for key in to_remove:
kwargs.pop(UpperCAmelCase_ , UpperCAmelCase_)
if return_unused_kwargs:
return config, kwargs
else:
return config
def __lowercase ( self , UpperCAmelCase_) -> Any:
'''simple docstring'''
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as writer:
lowercase__: Dict = self.to_dict()
lowercase__: List[Any] = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_) + "\n"
writer.write(UpperCAmelCase_)
def __lowercase ( self) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: List[Any] = copy.deepcopy(self.__dict__)
lowercase__: Any = str(output["bnb_4bit_compute_dtype"]).split(".")[1]
return output
def __repr__( self) -> str:
'''simple docstring'''
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __lowercase ( self , UpperCAmelCase_ = True) -> str:
'''simple docstring'''
if use_diff is True:
lowercase__: Tuple = self.to_diff_dict()
else:
lowercase__: List[str] = self.to_dict()
return json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_) + "\n"
def __lowercase ( self) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: Tuple = self.to_dict()
# get the default config dict
lowercase__: int = BitsAndBytesConfig().to_dict()
lowercase__: Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__: Any = value
return serializable_config_dict
| 120 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase: Optional[int] = (720, 1_280) # Height, Width
_lowerCAmelCase: Union[str, Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase: Optional[Any] = 1 / 100
_lowerCAmelCase: str = ''
_lowerCAmelCase: Optional[Any] = ''
_lowerCAmelCase: str = ''
_lowerCAmelCase: Union[str, Any] = 250
def _lowercase( ):
a__ , a__ =get_dataset(__a , __a )
for index in range(__a ):
a__ =random.sample(range(len(__a ) ) , 4 )
a__ , a__ , a__ =update_image_and_anno(
__a , __a , __a , __a , __a , filter_scale=__a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ =random_chars(32 )
a__ =path.split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ =f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , __a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
a__ =[]
for anno in new_annos:
a__ =anno[3] - anno[1]
a__ =anno[4] - anno[2]
a__ =anno[1] + width / 2
a__ =anno[2] + height / 2
a__ =f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__a )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowercase( __a : str , __a : str ):
a__ =[]
a__ =[]
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
a__ =label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
a__ =in_file.readlines()
a__ =os.path.join(__a , f"""{label_name}.jpg""" )
a__ =[]
for obj_list in obj_lists:
a__ =obj_list.rstrip('\n' ).split(' ' )
a__ =float(obj[1] ) - float(obj[3] ) / 2
a__ =float(obj[2] ) - float(obj[4] ) / 2
a__ =float(obj[1] ) + float(obj[3] ) / 2
a__ =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def _lowercase( __a : list , __a : list , __a : list[int] , __a : tuple[int, int] , __a : tuple[float, float] , __a : float = 0.0 , ):
a__ =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a__ =int(scale_x * output_size[1] )
a__ =int(scale_y * output_size[0] )
a__ =[]
a__ =[]
for i, index in enumerate(__a ):
a__ =all_img_list[index]
path_list.append(__a )
a__ =all_annos[index]
a__ =cva.imread(__a )
if i == 0: # top-left
a__ =cva.resize(__a , (divid_point_x, divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =bbox[1] * scale_x
a__ =bbox[2] * scale_y
a__ =bbox[3] * scale_x
a__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a__ =cva.resize(__a , (output_size[1] - divid_point_x, divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =scale_x + bbox[1] * (1 - scale_x)
a__ =bbox[2] * scale_y
a__ =scale_x + bbox[3] * (1 - scale_x)
a__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a__ =cva.resize(__a , (divid_point_x, output_size[0] - divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =bbox[1] * scale_x
a__ =scale_y + bbox[2] * (1 - scale_y)
a__ =bbox[3] * scale_x
a__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a__ =cva.resize(
__a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =scale_x + bbox[1] * (1 - scale_x)
a__ =scale_y + bbox[2] * (1 - scale_y)
a__ =scale_x + bbox[3] * (1 - scale_x)
a__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a__ =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowercase( __a : int ):
assert number_char > 1, "The number of character should greater than 1"
a__ =ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( a_ ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ) -> str:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_lengths
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = vocab_size
UpperCAmelCase = n_special
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = summary_type
UpperCAmelCase = use_proj
UpperCAmelCase = scope
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
"""simple docstring"""
UpperCAmelCase = FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
UpperCAmelCase = model(snake_case__ , langs=snake_case__ )
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ )
UpperCAmelCase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> str:
"""simple docstring"""
UpperCAmelCase = FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ )
UpperCAmelCase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
UpperCAmelCase = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
((UpperCAmelCase ) , ) = result_with_labels.to_tuple()
UpperCAmelCase = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
((UpperCAmelCase ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
"""simple docstring"""
UpperCAmelCase = FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ )
UpperCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_choices
UpperCAmelCase = FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ) -> Dict:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = FlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=snake_case__ )
UpperCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase = torch.jit.trace(
snake_case__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , """traced_model.pt""" ) )
UpperCAmelCase = torch.jit.load(os.path.join(snake_case__ , """traced_model.pt""" ) , map_location=snake_case__ )
loaded(inputs_dict["""input_ids"""].to(snake_case__ ) , inputs_dict["""attention_mask"""].to(snake_case__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
UpperCAmelCase = model(snake_case__ )[0]
UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase_ ( a_ ):
_A : int = 'xlm-roberta-xl'
def __init__( self , snake_case__=25_08_80 , snake_case__=25_60 , snake_case__=36 , snake_case__=32 , snake_case__=1_02_40 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_14 , snake_case__=1 , snake_case__=0.02 , snake_case__=1e-05 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[int] = [True] * 1_00_00_01
lowerCAmelCase_ : Tuple = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowerCAmelCase_ : List[Any] = False
i += 1
def _lowerCamelCase ( lowercase : int ) -> bool:
return seive[n]
def _lowerCamelCase ( lowercase : int ) -> bool:
return any(digit in "02468" for digit in str(_snake_case ) )
def _lowerCamelCase ( lowercase : int = 100_0000 ) -> list[int]:
_a = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_snake_case ) and not contains_an_even_digit(_snake_case ):
_a = str(_snake_case )
_a = [int(str_num[j:] + str_num[:j] ) for j in range(len(_snake_case ) )]
if all(is_prime(_snake_case ) for i in list_nums ):
result.append(_snake_case )
return result
def _lowerCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 692 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__magic_name__ : Dict = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : str = use_token_type_ids
__magic_name__ : Dict = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = EsmModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , attention_mask=_a )
__magic_name__ : List[str] = model(_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : int = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = ()
UpperCamelCase__ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = EsmModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : List[str] = EsmEmbeddings(config=_a )
__magic_name__ : Dict = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__magic_name__ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__magic_name__ : Dict = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : str = EsmEmbeddings(config=_a )
__magic_name__ : Optional[Any] = torch.empty(2 , 4 , 30 )
__magic_name__ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__magic_name__ : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
__magic_name__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _snake_case ( snake_case ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Dict = model(_a )[0]
__magic_name__ : Optional[Any] = 33
__magic_name__ : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 124 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = ["image_processor", "tokenizer"]
lowerCamelCase = "OwlViTImageProcessor"
lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , __magic_name__ : str=None , __magic_name__ : Tuple=None , **__magic_name__ : Optional[int] ) -> List[str]:
lowerCamelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
lowerCamelCase_ : Dict = kwargs.pop("feature_extractor" )
lowerCamelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : str=None , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : int="max_length" , __magic_name__ : int="np" , **__magic_name__ : Tuple ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__magic_name__ , __magic_name__ ) or (isinstance(__magic_name__ , __magic_name__ ) and not isinstance(text[0] , __magic_name__ )):
lowerCamelCase_ : List[Any] = [self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(text[0] , __magic_name__ ):
lowerCamelCase_ : str = []
# Maximum number of queries across batch
lowerCamelCase_ : Union[str, Any] = max([len(__magic_name__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__magic_name__ ) != max_num_queries:
lowerCamelCase_ : List[Any] = t + [" "] * (max_num_queries - len(__magic_name__ ))
lowerCamelCase_ : Dict = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
encodings.append(__magic_name__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ : List[str] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : List[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ : List[str] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : List[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ : Dict = BatchEncoding()
lowerCamelCase_ : Dict = input_ids
lowerCamelCase_ : Optional[int] = attention_mask
if query_images is not None:
lowerCamelCase_ : Tuple = BatchEncoding()
lowerCamelCase_ : List[Any] = self.image_processor(
__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ).pixel_values
lowerCamelCase_ : Optional[int] = query_pixel_values
if images is not None:
lowerCamelCase_ : Tuple = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCamelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> Any:
return self.image_processor.post_process(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : str , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
return self.image_processor.post_process_object_detection(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> Union[str, Any]:
return self.image_processor.post_process_image_guided_detection(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : int ) -> int:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 253 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = filter(lambda __snake_case : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase : Any = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Optional[int] ):
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_A = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : List[Any] ):
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=__snake_case , verbose=__snake_case , )
class lowercase_ ( pl.Callback ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple ) -> Union[str, Any]:
_A = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase__ )
@rank_zero_only
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : pl.LightningModule, UpperCamelCase__ : str, UpperCamelCase__ : Dict=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_A = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=UpperCamelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__, 'a+' ) as writer:
for key in sorted(UpperCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(UpperCamelCase__, torch.Tensor ):
_A = val.item()
_A = f'{key}: {val:.6f}\n'
writer.write(UpperCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCamelCase__ )
@rank_zero_only
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(UpperCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCAmelCase ( self : int, UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase__, UpperCamelCase__, 'test' )
@rank_zero_only
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : pl.Trainer, UpperCamelCase__ : str ) -> Optional[Any]:
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 107 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__a = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a = []
__a = []
__a = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a = name[1:]
# figure out how many levels deep the name is
__a = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE )
# read data
__a = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
names.append("""/""".join(_SCREAMING_SNAKE_CASE ) )
arrays.append(_SCREAMING_SNAKE_CASE )
logger.info(f"Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers" )
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})" )
__a = list(set(_SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = full_name.split("""/""" )
__a = model
__a = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__a = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__a = getattr(_SCREAMING_SNAKE_CASE , """encoder""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """layer""" )
__a = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """pooler""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__a = getattr(_SCREAMING_SNAKE_CASE , """intermediate""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__a = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a = """.""".join(_SCREAMING_SNAKE_CASE )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _SCREAMING_SNAKE_CASE ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _SCREAMING_SNAKE_CASE ):
__a = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a = array.transpose()
if pointer.shape == array.shape:
__a = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
logger.info(f"Loading model based on config from {config_path}..." )
__a = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
__a = BertModel(_SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCamelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 225 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct_text_model"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCamelCase : List[str]=5_0244 , _UpperCamelCase : str=768 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[int]=2048 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Dict=128 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Any=1E-6 , _UpperCamelCase : List[Any]=1.0 , _UpperCamelCase : Any="gelu_new" , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=0 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[str]=True , **_UpperCamelCase : str , ) ->str:
"""simple docstring"""
_lowerCamelCase : int = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : int = d_kv
_lowerCamelCase : Any = d_ff
_lowerCamelCase : int = num_layers
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : Any = relative_attention_num_buckets
_lowerCamelCase : Union[str, Any] = relative_attention_max_distance
_lowerCamelCase : str = dropout_rate
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : Optional[Any] = initializer_factor
_lowerCamelCase : Any = use_cache
_lowerCamelCase : Dict = eos_token_id
_lowerCamelCase : Dict = decoder_start_token_id
# for backwards compatibility
_lowerCamelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCamelCase : Any , **_UpperCamelCase : Optional[int]) ->Tuple:
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_)
_lowerCamelCase : Dict = cls.get_config_dict(snake_case_ , **snake_case_)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""") == "pix2struct":
_lowerCamelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct_vision_model"""
def __init__( self : Optional[int] , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : str=768 , _UpperCamelCase : Optional[Any]=2048 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[int]=12 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]="gelu_new" , _UpperCamelCase : str=1E-6 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : List[Any]=1E-1_0 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : str=4096 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : int=128 , **_UpperCamelCase : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**snake_case_)
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Dict = patch_embed_hidden_size
_lowerCamelCase : Tuple = d_ff
_lowerCamelCase : Tuple = dropout_rate
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : List[str] = initializer_factor
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = dense_act_fn
_lowerCamelCase : int = seq_len
_lowerCamelCase : Optional[Any] = relative_attention_num_buckets
_lowerCamelCase : str = relative_attention_max_distance
_lowerCamelCase : List[Any] = d_kv
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , _UpperCamelCase : Tuple , **_UpperCamelCase : str) ->int:
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_)
_lowerCamelCase : str = cls.get_config_dict(snake_case_ , **snake_case_)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""") == "pix2struct":
_lowerCamelCase : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(snake_case_ , **snake_case_)
class __snake_case ( _snake_case ):
'''simple docstring'''
_snake_case = """pix2struct"""
_snake_case = True
def __init__( self : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : Optional[Any] , ) ->int:
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_)
if text_config is None:
_lowerCamelCase : List[str] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""")
if vision_config is None:
_lowerCamelCase : Optional[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""")
_lowerCamelCase : Tuple = PixaStructTextConfig(**snake_case_)
_lowerCamelCase : Optional[int] = PixaStructVisionConfig(**snake_case_)
_lowerCamelCase : List[str] = self.text_config.decoder_start_token_id
_lowerCamelCase : List[str] = self.text_config.pad_token_id
_lowerCamelCase : Dict = self.text_config.eos_token_id
_lowerCamelCase : Union[str, Any] = initializer_factor
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Any = self.initializer_range
_lowerCamelCase : Optional[int] = self.initializer_range
_lowerCamelCase : List[str] = is_vqa
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , **_UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : List[str] = copy.deepcopy(self.__dict__)
_lowerCamelCase : str = self.text_config.to_dict()
_lowerCamelCase : Optional[int] = self.vision_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output
| 720 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]) ->Tuple:
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Dict , _UpperCamelCase : List[str]=None) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : str = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[int] , _UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCamelCase : Optional[int]) ->Dict:
"""simple docstring"""
return super().__call__(_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Tuple = load_image(_UpperCamelCase)
_lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Any = self.model(**_UpperCamelCase)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=5) ->str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1)[0]
_lowerCamelCase , _lowerCamelCase : Dict = probs.topk(_UpperCamelCase)
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
_lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase)]
| 15 | 0 |
'''simple docstring'''
import string
def _lowerCAmelCase ( _lowerCAmelCase )-> None:
for key in range(len(string.ascii_uppercase ) ):
__UpperCAmelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCAmelCase = string.ascii_uppercase.find(_lowerCAmelCase )
__UpperCAmelCase = num - key
if num < 0:
__UpperCAmelCase = num + len(string.ascii_uppercase )
__UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
__UpperCAmelCase = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase = input('Encrypted message: ' )
__UpperCAmelCase = message.upper()
decrypt(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 126 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A: List[Any] = datasets.load_iris()
_A: Union[str, Any] = np.array(data["""data"""])
_A: Union[str, Any] = np.array(data["""target"""])
_A: Dict = data["""target_names"""]
_A , _A , _A , _A: List[str] = train_test_split(X, y)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 )-> int:
__UpperCAmelCase = zip(_lowerCAmelCase , _lowerCAmelCase )
# List of distances of all points from the point to be classified
__UpperCAmelCase = []
for data_point in data:
__UpperCAmelCase = euclidean_distance(data_point[0] , _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase_ = "docs/source/en/_toctree.yml"
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE : int = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE : Any = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE : List[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : s["title"].lower() )
def lowercase__( __UpperCamelCase: List[Any]=False ):
"""simple docstring"""
with open(__UpperCamelCase ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE : Dict = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE : Optional[int] = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE : List[Any] = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if 'sections' in section]
SCREAMING_SNAKE_CASE : Any = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = modality_doc['sections']
SCREAMING_SNAKE_CASE : Dict = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE : List[Any] = True
if overwrite:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE : List[Any] = model_doc
SCREAMING_SNAKE_CASE : Dict = api_doc
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(__UpperCamelCase ,allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 703 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : List[str] = None
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.head
while temp is not None:
print(temp.data, end=' ' )
SCREAMING_SNAKE_CASE : Tuple = temp.next
print()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Node(A )
SCREAMING_SNAKE_CASE : Any = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : List[Any] = node_a.next
SCREAMING_SNAKE_CASE : Any = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Any = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 508 | 0 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_a : Tuple = logging.get_logger(__name__)
class _lowercase :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'question_encoder_tokenizer' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ )
@classmethod
def a ( cls : Any , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , SCREAMING_SNAKE_CASE_ )
if config is None:
__snake_case = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
__snake_case = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] ) -> Tuple:
__snake_case = self.question_encoder
def a ( self : Optional[int] ) -> List[str]:
__snake_case = self.generator
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "longest" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = labels['input_ids']
return model_inputs
| 56 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''text''': Value('''string''' )} )
lowerCAmelCase = Features({} )
lowerCAmelCase = "text"
@property
def _UpperCamelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 198 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=[1, 1, 2] , __a=1 , __a=32 , __a=4 , __a=8 , __a=37 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=0.0 , __a=512 , __a=3 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=False , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = block_sizes
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = 2
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = initializer_std
# Used in the tests to check the size of the first attention layer
A__ = n_head
# Used in the tests to check the size of the first hidden state
A__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A__ = self.num_hidden_layers + 2
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = TFFunnelModel(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
A__ = [input_ids, input_mask]
A__ = model(__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = TFFunnelBaseModel(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
A__ = [input_ids, input_mask]
A__ = model(__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=__a )
A__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = TFFunnelForPreTraining(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = TFFunnelForMaskedLM(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFFunnelForSequenceClassification(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFFunnelForMultipleChoice(config=__a )
A__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
A__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFFunnelForTokenClassification(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ):
"""simple docstring"""
A__ = TFFunnelForQuestionAnswering(config=__a )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_: List[Any] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Optional[int] = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = TFFunnelModelTester(self )
A__ = ConfigTester(self , config_class=__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@require_tf
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = TFFunnelModelTester(self , base=__a )
A__ = ConfigTester(self , config_class=__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
| 554 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["""image_processor""", """feature_extractor"""]
SCREAMING_SNAKE_CASE_: Optional[int] = """TvltImageProcessor"""
SCREAMING_SNAKE_CASE_: Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __a , __a ):
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
A__ = image_processor
A__ = feature_extractor
def __call__( self , __a=None , __a=None , __a=None , __a=None , __a=False , __a=False , *__a , **__a , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
A__ = None
if images is not None:
A__ = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
A__ = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
A__ = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
A__ = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processor.model_input_names
A__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 554 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> bool:
"""simple docstring"""
if num < 0:
return False
SCREAMING_SNAKE_CASE_ : int =num
SCREAMING_SNAKE_CASE_ : int =0
while num > 0:
SCREAMING_SNAKE_CASE_ : List[Any] =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
import math
import sys
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : Tuple = ''
try:
with open(a , 'rb' ) as binary_file:
__A : List[str] = binary_file.read()
for dat in data:
__A : Optional[Any] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = {'0': '0', '1': '1'}
__A , __A : str = '', ''
__A : List[str] = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__A : Dict = lexicon[curr_string]
result += last_match_id
__A : List[str] = last_match_id + '0'
if math.loga(a ).is_integer():
__A : int = {}
for curr_key in list(a ):
__A : Union[str, Any] = lexicon.pop(a )
__A : List[Any] = new_lex
__A : int = last_match_id + '1'
index += 1
__A : Any = ''
return result
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
__A : int = 8
try:
with open(a , 'wb' ) as opened_file:
__A : str = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : int = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__A : str = data_bits[counter:]
__A : List[str] = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
__A : int = read_file_binary(a )
__A : Union[str, Any] = remove_prefix(a )
__A : int = decompress_data(a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 239 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = PhobertTokenizer
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : str = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCAmelCase : Dict = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Tuple = ['''#version: 0.2''', '''l à</w>''']
lowerCAmelCase : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[str] , **UpperCamelCase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = '''Tôi là VinAI Research'''
lowerCAmelCase : str = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : int = '''Tôi là VinAI Research'''
lowerCAmelCase : Any = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase_ )
print(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = tokens + [tokenizer.unk_token]
lowerCAmelCase : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 710 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def a_ ( _lowerCAmelCase ,_lowerCAmelCase = 16 ) -> Any:
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : List[str] = load_dataset('glue' ,'mrpc' )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Optional[int] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : int = datasets.map(
__lowerCAmelCase ,batched=__lowerCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Dict = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase : List[str] = 8
else:
__lowerCamelCase : Optional[int] = None
return tokenizer.pad(
__lowerCAmelCase ,padding='longest' ,max_length=__lowerCAmelCase ,pad_to_multiple_of=__lowerCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
__lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets['train'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase )
__lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['validation'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' ,__lowerCAmelCase ) == "1":
__lowerCamelCase : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowerCamelCase : Dict = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='all' ,project_dir=args.project_dir )
else:
__lowerCamelCase : List[str] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Union[str, Any] = config['lr']
__lowerCamelCase : Optional[int] = int(config['num_epochs'] )
__lowerCamelCase : Optional[int] = int(config['seed'] )
__lowerCamelCase : List[Any] = int(config['batch_size'] )
set_seed(__lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : Any = get_dataloaders(__lowerCAmelCase ,__lowerCAmelCase )
__lowerCamelCase : Tuple = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : Optional[int] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : str = AdamW(params=model.parameters() ,lr=__lowerCAmelCase )
# Instantiate scheduler
__lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = accelerator.prepare(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowerCamelCase : int = os.path.split(__lowerCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(__lowerCAmelCase ,__lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowerCamelCase : Dict = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
__lowerCamelCase : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowerCamelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**__lowerCAmelCase )
__lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowerCAmelCase ,references=__lowerCAmelCase ,)
__lowerCamelCase : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,__lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(__lowerCAmelCase ),
'epoch': epoch,
} ,step=__lowerCAmelCase ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a_ ( ) -> int:
__lowerCamelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' ,action='store_true' ,help='Whether to load in all available experiment trackers from the environment and use them for logging.' ,)
parser.add_argument(
'--project_dir' ,type=__lowerCAmelCase ,default='logs' ,help='Location on where to store experiment tracking logs` and relevent project information' ,)
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCAmelCase ,__lowerCAmelCase )
if __name__ == "__main__":
main()
| 459 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__magic_name__ : Tuple = data_utils.TransfoXLTokenizer
__magic_name__ : int = data_utils.TransfoXLCorpus
__magic_name__ : List[str] = data_utils
__magic_name__ : str = data_utils
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCAmelCase , '''rb''' ) as fp:
lowerCAmelCase__ = pickle.load(__lowerCAmelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowerCAmelCase__ = corpus.vocab.__dict__
torch.save(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCAmelCase )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCAmelCase__ = os.path.abspath(__lowerCAmelCase )
lowerCAmelCase__ = os.path.abspath(__lowerCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCAmelCase__ = TransfoXLConfig()
else:
lowerCAmelCase__ = TransfoXLConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = TransfoXLLMHeadModel(__lowerCAmelCase )
lowerCAmelCase__ = load_tf_weights_in_transfo_xl(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
lowerCAmelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(__lowerCAmelCase )}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__magic_name__ : Optional[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 615 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase : Any =[int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = os.path.dirname(os.path.realpath(lowercase__ ) )
a__ = os.path.join(lowercase__ , """words.txt""" )
a__ = """"""
with open(lowercase__ ) as f:
a__ = f.readline()
a__ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
a__ = [
word
for word in [sum(ord(lowercase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 412 |
import math
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'gpt_bigcode'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , _lowerCamelCase=50257 , _lowerCamelCase=1024 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=None , _lowerCamelCase="gelu_pytorch_tanh" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=50256 , _lowerCamelCase=50256 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = n_positions
UpperCAmelCase__ : List[str] = n_embd
UpperCAmelCase__ : Dict = n_layer
UpperCAmelCase__ : str = n_head
UpperCAmelCase__ : Any = n_inner
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Optional[int] = embd_pdrop
UpperCAmelCase__ : str = attn_pdrop
UpperCAmelCase__ : Optional[int] = layer_norm_epsilon
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Optional[Any] = scale_attn_weights
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Dict = attention_softmax_in_fpaa
UpperCAmelCase__ : Union[str, Any] = scale_attention_softmax_in_fpaa
UpperCAmelCase__ : Union[str, Any] = multi_query
UpperCAmelCase__ : Tuple = bos_token_id
UpperCAmelCase__ : Dict = eos_token_id
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
| 182 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'bart'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , _lowerCamelCase=50265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : str = encoder_ffn_dim
UpperCAmelCase__ : List[str] = encoder_layers
UpperCAmelCase__ : Optional[int] = encoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : int = decoder_attention_heads
UpperCAmelCase__ : Optional[Any] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[str] = init_std
UpperCAmelCase__ : Dict = encoder_layerdrop
UpperCAmelCase__ : Any = decoder_layerdrop
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : Union[str, Any] = encoder_layers
UpperCAmelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ : Dict = {0: """batch"""}
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Any = super().outputs
else:
UpperCAmelCase__ : Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
UpperCAmelCase__ : int = seq_length if not self.use_past else 1
UpperCAmelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : Union[str, Any] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : str = common_inputs["""input_ids"""].shape
UpperCAmelCase__ : int = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_attention_heads
UpperCAmelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = decoder_seq_length + 3
UpperCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
UpperCAmelCase__ : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
UpperCAmelCase__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : int = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : str = self.num_attention_heads
UpperCAmelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase__ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
UpperCAmelCase__ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : Optional[int] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
UpperCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 182 | 1 |
"""simple docstring"""
from math import factorial, radians
def _snake_case ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ):
lowerCAmelCase : Optional[int] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowerCAmelCase : Any = radians(_snake_case )
lowerCAmelCase : Any = angle_in_radians
lowerCAmelCase : List[Any] = 3
lowerCAmelCase : Optional[int] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
lowerCAmelCase : str = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 637 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: str = len(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowercase: Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A__ : Optional[int] = list(range(1_0, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 353 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger()
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : bool = True
def __call__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : str = Tracker(self.src )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(a__ )} operations while'
f' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
__SCREAMING_SNAKE_CASE : str = len(a__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleDict(a__ )
def a_ ( self , a__ ):
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ):
# default to timm!
if x not in self:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_name_to_timm(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = super().__getitem__(a__ )
return val
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __getitem__( self , a__ ):
if "seer" in x and "in1k" not in x:
__SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification
return val
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__SCREAMING_SNAKE_CASE : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = from_model_func()
__SCREAMING_SNAKE_CASE : Any = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Any = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__SCREAMING_SNAKE_CASE : str = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__SCREAMING_SNAKE_CASE : Dict = from_model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : str = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(f'Pushed {name}' )
def __A ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__SCREAMING_SNAKE_CASE : Optional[int] = NameToOurModelFuncMap()
__SCREAMING_SNAKE_CASE : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="cpu" )
__SCREAMING_SNAKE_CASE : Dict = model_func()
# check if we have a head, if yes add it
__SCREAMING_SNAKE_CASE : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
__SCREAMING_SNAKE_CASE : int = model_state_dict["trunk"]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__SCREAMING_SNAKE_CASE : Any = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : str = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase = parser.parse_args()
lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = BlenderbotSmallTokenizer
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Optional[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase : Tuple = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''adapt act apte'''
UpperCamelCase : List[Any] = '''adapt act apte'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : List[str] = '''adapt act apte'''
UpperCamelCase : List[str] = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase : str = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
UpperCamelCase : Dict = '''I am a small frog.'''
UpperCamelCase : Tuple = tok([src_text] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : Tuple = tok.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase : List[Any] = '''I am a small frog .'''
UpperCamelCase : Union[str, Any] = '''.'''
UpperCamelCase : Dict = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : Optional[int] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 716 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=8 ) -> str:
__snake_case = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase ( snake_case__ ):
def __init__( self : List[Any] ,_lowerCAmelCase : MultilingualCLIP ,_lowerCAmelCase : XLMRobertaTokenizer ,_lowerCAmelCase : UNetaDConditionModel ,_lowerCAmelCase : Union[DDIMScheduler, DDPMScheduler] ,_lowerCAmelCase : VQModel ,):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,movq=_lowerCAmelCase ,)
__snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase_ ( self : List[Any] ,_lowerCAmelCase : Any ,_lowerCAmelCase : int ,_lowerCAmelCase : str ,_lowerCAmelCase : List[Any] ,_lowerCAmelCase : List[str] ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if latents is None:
__snake_case = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__snake_case = latents.to(_lowerCAmelCase )
__snake_case = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : List[Any] ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Any ,_lowerCAmelCase : List[str]=None ,):
"""simple docstring"""
__snake_case = len(_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else 1
# get prompt text embeddings
__snake_case = self.tokenizer(
_lowerCAmelCase ,padding="max_length" ,truncation=_lowerCAmelCase ,max_length=77 ,return_attention_mask=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_tensors="pt" ,)
__snake_case = text_inputs.input_ids
__snake_case = self.tokenizer(_lowerCAmelCase ,padding="longest" ,return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case = text_input_ids.to(_lowerCAmelCase )
__snake_case = text_inputs.attention_mask.to(_lowerCAmelCase )
__snake_case , __snake_case = self.text_encoder(
input_ids=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
__snake_case = prompt_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
__snake_case = text_encoder_hidden_states.repeat_interleave(_lowerCAmelCase ,dim=0 )
__snake_case = text_mask.repeat_interleave(_lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [""] * batch_size
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !="""
F""" {type(_lowerCAmelCase )}.""" )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__snake_case = negative_prompt
__snake_case = self.tokenizer(
_lowerCAmelCase ,padding="max_length" ,max_length=77 ,truncation=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,return_tensors="pt" ,)
__snake_case = uncond_input.input_ids.to(_lowerCAmelCase )
__snake_case = uncond_input.attention_mask.to(_lowerCAmelCase )
__snake_case , __snake_case = self.text_encoder(
input_ids=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = negative_prompt_embeds.shape[1]
__snake_case = negative_prompt_embeds.repeat(1 ,_lowerCAmelCase )
__snake_case = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,_lowerCAmelCase )
__snake_case = uncond_text_encoder_hidden_states.shape[1]
__snake_case = uncond_text_encoder_hidden_states.repeat(1 ,_lowerCAmelCase ,1 )
__snake_case = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,_lowerCAmelCase ,-1 )
__snake_case = uncond_text_mask.repeat_interleave(_lowerCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case = torch.device(F"""cuda:{gpu_id}""" )
__snake_case = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case = cpu_offload_with_hook(_lowerCAmelCase ,_lowerCAmelCase ,prev_module_hook=_lowerCAmelCase )
if self.safety_checker is not None:
__snake_case , __snake_case = cpu_offload_with_hook(self.safety_checker ,_lowerCAmelCase ,prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
__snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Tuple ,_lowerCAmelCase : Union[str, List[str]] ,_lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_lowerCAmelCase : Optional[Union[str, List[str]]] = None ,_lowerCAmelCase : int = 512 ,_lowerCAmelCase : int = 512 ,_lowerCAmelCase : int = 100 ,_lowerCAmelCase : float = 4.0 ,_lowerCAmelCase : int = 1 ,_lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_lowerCAmelCase : Optional[torch.FloatTensor] = None ,_lowerCAmelCase : Optional[str] = "pil" ,_lowerCAmelCase : bool = True ,):
"""simple docstring"""
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = 1
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = len(_lowerCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}""" )
__snake_case = self._execution_device
__snake_case = batch_size * num_images_per_prompt
__snake_case = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case = self._encode_prompt(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = torch.cat(_lowerCAmelCase ,dim=0 )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = torch.cat(_lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
__snake_case = image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
__snake_case = negative_image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
__snake_case = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase ,device=_lowerCAmelCase )
__snake_case = self.scheduler.timesteps
__snake_case = self.unet.config.in_channels
__snake_case , __snake_case = get_new_h_w(_lowerCAmelCase ,_lowerCAmelCase ,self.movq_scale_factor )
# create initial latent
__snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__snake_case = self.unet(
sample=_lowerCAmelCase ,timestep=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,added_cond_kwargs=_lowerCAmelCase ,return_dict=_lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
__snake_case , __snake_case = noise_pred.chunk(2 )
__snake_case , __snake_case = variance_pred.chunk(2 )
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ,).prev_sample
# post-processing
__snake_case = self.movq.decode(_lowerCAmelCase ,force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__snake_case = image * 0.5 + 0.5
__snake_case = image.clamp(0 ,1 )
__snake_case = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 524 | from __future__ import annotations
def _lowerCamelCase( __snake_case , __snake_case ) -> list[int]:
__snake_case = 0
__snake_case = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__snake_case = i + 1
else:
__snake_case = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 524 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = np.shape(__UpperCAmelCase )
if rows != columns:
_lowercase : Optional[int] = (
"""'table' has to be of square shaped array but got a """
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(__UpperCAmelCase )
_lowercase : Any = np.zeros((rows, columns) )
_lowercase : List[str] = np.zeros((rows, columns) )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
_lowercase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(__UpperCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowercase : Any = (table[i][j] - total) / upper[j][j]
_lowercase : str = 1
for j in range(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = sum(lower[i][k] * upper[k][j] for k in range(__UpperCAmelCase ) )
_lowercase : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_lowercase : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = 0
_lowercase : Any = 2
while digits < n:
index += 1
_lowercase : Union[str, Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 600 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : int =JukeboxTokenizer
a : Optional[Any] ={
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase_: Optional[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase_: Any = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Any = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 57 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [float(x) for x in input("""Enter the elements of first array: """).split()]
lowercase_ = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 37 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.