code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __snake_case ( unittest.TestCase , __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = load_tool('text-classification')
self.tool.setup()
a__: str = load_tool('text-classification' , remote=lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowercase , 'positive')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowercase , 'positive')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowercase , 'positive')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowercase , 'positive')
| 290 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __snake_case :
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Any = start
a__: Tuple = end
a__: List[Any] = val
a__: Any = (start + end) // 2
a__: Tuple = left
a__: Tuple = right
def __repr__( self) -> Optional[int]:
'''simple docstring'''
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class __snake_case :
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = collection
a__: int = function
if self.collection:
a__: Optional[Any] = self._build_tree(0 , len(lowercase) - 1)
def lowerCamelCase_ ( self , lowercase , lowercase) -> int:
'''simple docstring'''
self._update_tree(self.root , lowercase , lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
return self._query_range(self.root , lowercase , lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start])
a__: Any = (start + end) // 2
a__: List[Any] = self._build_tree(lowercase , lowercase)
a__: int = self._build_tree(mid + 1 , lowercase)
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val) , lowercase , lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if node.start == i and node.end == i:
a__: Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase)
else:
self._update_tree(node.right , lowercase , lowercase)
a__: str = self.fn(node.left.val , node.right.val)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid) , self._query_range(node.right , node.mid + 1 , lowercase) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
if self.root is not None:
a__: int = Queue()
queue.put(self.root)
while not queue.empty():
a__: int = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowercase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__: Any = parent
a__: Optional[int] = 13
a__: List[str] = 7
a__: Tuple = True
a__: int = True
a__: Any = True
a__: Dict = True
a__: Tuple = 99
a__: Dict = 3_84
a__: Optional[int] = 2
a__: Dict = 4
a__: str = 37
a__: Union[str, Any] = 'gelu'
a__: List[Any] = 0.1
a__: Tuple = 0.1
a__: Optional[int] = 5_12
a__: Any = 16
a__: Union[str, Any] = 2
a__: List[Any] = 0.02
a__: str = 3
a__: int = 4
a__: List[Any] = 1_28
a__: Tuple = 2
a__: Dict = 9
a__: Any = 1
a__: Dict = None
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Optional[Any] = None
if self.use_input_mask:
a__: int = random_attention_mask([self.batch_size, self.seq_length])
a__: Optional[int] = None
if self.use_token_type_ids:
a__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: Union[str, Any] = None
a__: Tuple = None
a__: Optional[int] = None
if self.use_labels:
a__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
a__: Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = TFConvBertModel(config=lowercase)
a__: int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: List[Any] = [input_ids, input_mask]
a__: str = model(lowercase)
a__: Tuple = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = TFConvBertForMaskedLM(config=lowercase)
a__: List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = self.num_labels
a__: str = TFConvBertForSequenceClassification(config=lowercase)
a__: List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.num_choices
a__: int = TFConvBertForMultipleChoice(config=lowercase)
a__: str = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: List[Any] = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: Dict = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Optional[Any] = self.num_labels
a__: Optional[int] = TFConvBertForTokenClassification(config=lowercase)
a__: Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__: str = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = TFConvBertForQuestionAnswering(config=lowercase)
a__: Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__: Tuple = model(lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): List[str] = config_and_inputs
a__: int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = TFConvBertModelTester(self)
a__: Optional[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
a__: Tuple = True
a__: int = True
if hasattr(lowercase , 'use_cache'):
a__: str = True
a__: List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__: Any = getattr(self.model_tester , 'key_length' , lowercase)
for model_class in self.all_model_classes:
a__: Any = self._prepare_for_class(lowercase , lowercase)
a__: Any = model_class(lowercase)
a__: Union[str, Any] = len(model(lowercase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase , saved_model=lowercase)
a__: List[Any] = os.path.join(lowercase , 'saved_model' , '1')
a__: Tuple = tf.keras.models.load_model(lowercase)
a__: Optional[Any] = model(lowercase)
if self.is_encoder_decoder:
a__: Union[str, Any] = outputs['encoder_hidden_states']
a__: Tuple = outputs['encoder_attentions']
else:
a__: int = outputs['hidden_states']
a__: str = outputs['attentions']
self.assertEqual(len(lowercase) , lowercase)
a__: List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase) , lowercase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__: int = self.model_tester.prepare_config_and_inputs_for_common()
a__: Tuple = True
a__: Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
a__: List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__: Any = getattr(self.model_tester , 'key_length' , lowercase)
a__: Optional[int] = getattr(self.model_tester , 'key_length' , lowercase)
def check_decoder_attentions_output(lowercase):
a__: Union[str, Any] = len(lowercase)
self.assertEqual(out_len % 2 , 0)
a__: List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase):
a__: List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a__: Optional[Any] = True
a__: Optional[Any] = False
a__: Union[str, Any] = model_class(lowercase)
a__: int = model(self._prepare_for_class(lowercase , lowercase))
a__: Any = len(lowercase)
self.assertEqual(config.output_hidden_states , lowercase)
check_encoder_attentions_output(lowercase)
if self.is_encoder_decoder:
a__: Dict = model_class(lowercase)
a__: Optional[Any] = model(self._prepare_for_class(lowercase , lowercase))
self.assertEqual(config.output_hidden_states , lowercase)
check_decoder_attentions_output(lowercase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__: str = True
a__: Dict = model_class(lowercase)
a__: Dict = model(self._prepare_for_class(lowercase , lowercase))
self.assertEqual(config.output_hidden_states , lowercase)
check_encoder_attentions_output(lowercase)
# Check attention is always last and order is fine
a__: int = True
a__: List[str] = True
a__: Tuple = model_class(lowercase)
a__: int = model(self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase))
self.assertEqual(model.config.output_hidden_states , lowercase)
check_encoder_attentions_output(lowercase)
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: int = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
a__: Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
a__: str = model(lowercase)[0]
a__: int = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase)
a__: int = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4)
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=[1, 1, 2] , lowercase=1 , lowercase=32 , lowercase=4 , lowercase=8 , lowercase=37 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=5_12 , lowercase=3 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=False , ) -> Tuple:
'''simple docstring'''
a__: Tuple = parent
a__: Any = batch_size
a__: List[Any] = seq_length
a__: Any = is_training
a__: Any = use_input_mask
a__: Union[str, Any] = use_token_type_ids
a__: Optional[Any] = use_labels
a__: List[Any] = vocab_size
a__: Dict = block_sizes
a__: List[str] = num_decoder_layers
a__: List[Any] = d_model
a__: Optional[Any] = n_head
a__: Dict = d_head
a__: str = d_inner
a__: Any = hidden_act
a__: Tuple = hidden_dropout
a__: str = attention_dropout
a__: Optional[Any] = activation_dropout
a__: List[Any] = max_position_embeddings
a__: int = type_vocab_size
a__: Dict = 2
a__: Dict = num_labels
a__: Dict = num_choices
a__: str = scope
a__: List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
a__: Union[str, Any] = n_head
# Used in the tests to check the size of the first hidden state
a__: Optional[int] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
a__: Dict = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
a__: List[Any] = self.num_hidden_layers + 2
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Dict = None
if self.use_input_mask:
a__: Any = random_attention_mask([self.batch_size, self.seq_length])
a__: str = None
if self.use_token_type_ids:
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: int = None
a__: Dict = None
a__: str = None
if self.use_labels:
a__: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: Tuple = ids_tensor([self.batch_size] , self.num_choices)
a__: Any = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = TFFunnelModel(config=lowercase)
a__: List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: List[Any] = model(lowercase)
a__: int = [input_ids, input_mask]
a__: Dict = model(lowercase)
a__: Dict = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a__: Dict = False
a__: str = TFFunnelModel(config=lowercase)
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a__: Optional[int] = False
a__: Optional[int] = TFFunnelModel(config=lowercase)
a__: Any = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = TFFunnelBaseModel(config=lowercase)
a__: int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: int = model(lowercase)
a__: List[Any] = [input_ids, input_mask]
a__: str = model(lowercase)
a__: Any = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
a__: str = False
a__: Any = TFFunnelBaseModel(config=lowercase)
a__: int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
a__: Union[str, Any] = False
a__: Tuple = TFFunnelBaseModel(config=lowercase)
a__: int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
a__: List[str] = TFFunnelForPreTraining(config=lowercase)
a__: int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
'''simple docstring'''
a__: Optional[Any] = TFFunnelForMaskedLM(config=lowercase)
a__: Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: str = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
'''simple docstring'''
a__: int = self.num_labels
a__: Tuple = TFFunnelForSequenceClassification(config=lowercase)
a__: Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
'''simple docstring'''
a__: List[Any] = self.num_choices
a__: int = TFFunnelForMultipleChoice(config=lowercase)
a__: Tuple = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: Tuple = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: Union[str, Any] = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
a__: Optional[Any] = self.num_labels
a__: str = TFFunnelForTokenClassification(config=lowercase)
a__: Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
'''simple docstring'''
a__: Tuple = TFFunnelForQuestionAnswering(config=lowercase)
a__: str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): int = config_and_inputs
a__: List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = TFFunnelModelTester(self)
a__: Tuple = ConfigTester(self , config_class=lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase)
@require_tf
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[Any] = TFFunnelModelTester(self , base=lowercase)
a__: int = ConfigTester(self , config_class=lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase)
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
a__: List[str] = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
a__: int = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowercase__ = []
lowercase__ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowercase__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
lowercase__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowercase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowercase__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"Following is minimal change for {value}: ")
lowercase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = """AutoTokenizer"""
a__ = ["""tokenizer"""]
a__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None) -> str:
'''simple docstring'''
super().__init__(lowercase)
a__: Optional[Any] = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
a__: Union[str, Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(lowercase , lowercase)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.')
a__: str = None
else:
with open(lowercase) as speaker_embeddings_json:
a__: Tuple = json.load(lowercase)
else:
a__: Optional[int] = None
a__: int = AutoTokenizer.from_pretrained(lowercase , **lowercase)
return cls(tokenizer=lowercase , speaker_embeddings=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ) -> List[str]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2') , exist_ok=lowercase)
a__: Optional[int] = {}
a__: Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a__: int = self._load_voice_preset(lowercase)
a__: Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , f'{prompt_key}_{key}') , voice_preset[key] , allow_pickle=lowercase , )
a__: str = os.path.join(lowercase , f'{prompt_key}_{key}.npy')
a__: Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase) , 'w') as fp:
json.dump(lowercase , lowercase)
super().save_pretrained(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase = None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = self.speaker_embeddings[voice_preset]
a__: int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].')
a__: Any = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.')
a__: Dict = np.load(lowercase)
return voice_preset_dict
def lowerCamelCase_ ( self , lowercase = None) -> Dict:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.')
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=2_56 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowercase , lowercase):
if (
isinstance(lowercase , lowercase)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a__: List[str] = self._load_voice_preset(lowercase)
else:
if isinstance(lowercase , lowercase) and not voice_preset.endswith('.npz'):
a__: Optional[Any] = voice_preset + '.npz'
a__: int = np.load(lowercase)
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase)
a__: int = BatchFeature(data=lowercase , tensor_type=lowercase)
a__: int = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
a__: Dict = voice_preset
return encoded_text
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "cpu" , _SCREAMING_SNAKE_CASE = None ) ->None:
a__: List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
a__: Optional[Any] = v.half()
if save_path is None: # overwrite src_path
a__: Optional[Any] = src_path
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyVaaControlnetPipeline
a__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__: Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: Optional[int] = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
torch.manual_seed(0)
a__: str = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = self.dummy_unet
a__: List[Any] = self.dummy_movq
a__: str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase)).to(lowercase)
a__: Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase)
# create hint
a__: Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__: List[str] = torch.manual_seed(lowercase)
else:
a__: Union[str, Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Union[str, Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: str = 'cpu'
a__: int = self.get_dummy_components()
a__: Optional[int] = self.pipeline_class(**lowercase)
a__: Any = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[Any] = pipe(**self.get_dummy_inputs(lowercase))
a__: Any = output.images
a__: Any = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
a__: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
a__: int = torch.from_numpy(np.array(lowercase)).float() / 255.0
a__: Tuple = hint.permute(2 , 0 , 1).unsqueeze(0)
a__: List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa)
a__: Union[str, Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Union[str, Any] = 'A robot, 4k photo'
a__: List[str] = torch.Generator(device='cuda').manual_seed(0)
a__ , a__: Union[str, Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
a__: Optional[int] = pipeline(
image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=1_00 , output_type='np' , )
a__: Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE ) ->list[int]: # This function is recursive
a__: Optional[int] = len(_SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__: Tuple = array[0]
a__: Tuple = False
a__: Any = 1
a__: list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
a__: Tuple = True
a__: str = [element for element in array[i:] if element >= array[i]]
a__: Dict = longest_subsequence(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
a__: Dict = temp_array
else:
i += 1
a__: List[Any] = [element for element in array[1:] if element >= pivot]
a__: int = [pivot, *longest_subsequence(_SCREAMING_SNAKE_CASE )]
if len(_SCREAMING_SNAKE_CASE ) > len(_SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __snake_case ( unittest.TestCase ):
@require_torch
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
a__: Union[str, Any] = load_dataset('ashraq/esc50')
a__: List[str] = dataset['train']['audio'][-1]['array']
a__: str = audio_classifier(lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowercase) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
a__: Dict = load_dataset('ashraq/esc50')
a__: Union[str, Any] = dataset['train']['audio'][-1]['array']
a__: str = audio_classifier(lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowercase) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
a__: List[Any] = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(lowercase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
a__: Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 290 | """simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( _SCREAMING_SNAKE_CASE = 0.1 ) ->int:
a__: str = 3
a__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Input value of [number={number}] must be an integer'
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_SCREAMING_SNAKE_CASE ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
import operator as op
lowercase__ = 'scaler.pt'
lowercase__ = 'pytorch_model'
lowercase__ = 'random_states'
lowercase__ = 'optimizer'
lowercase__ = 'scheduler'
lowercase__ = 'pytorch_model.bin'
lowercase__ = 'pytorch_model.bin.index.json'
lowercase__ = 'model.safetensors'
lowercase__ = 'model.safetensors.index.json'
lowercase__ = '1.10.2'
lowercase__ = 'py38'
lowercase__ = '4.17.0'
lowercase__ = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
lowercase__ = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
lowercase__ = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
lowercase__ = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
lowercase__ = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
lowercase__ = '2.0.1'
lowercase__ = ['pdsh', 'standard', 'openmpi', 'mvapich']
lowercase__ = ['default', 'reduce-overhead', 'max-autotune']
lowercase__ = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowercase__ = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
lowercase__ = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
lowercase__ = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 290 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 1 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase__ = _symbol_database.Default()
lowercase__ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowercase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase__ = None
lowercase__ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase__ = 45
lowercase__ = 1581
lowercase__ = 1517
lowercase__ = 1570
lowercase__ = 1584
lowercase__ = 1793
lowercase__ = 1795
lowercase__ = 1916
lowercase__ = 1864
lowercase__ = 1905
lowercase__ = 1919
lowercase__ = 2429
lowercase__ = 2208
lowercase__ = 2418
lowercase__ = 2323
lowercase__ = 2407
# @@protoc_insertion_point(module_scope)
| 290 | """simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | 1 |
"""simple docstring"""
lowercase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
a__: Dict = set()
# keep track of all the paths to be checked
a__: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
a__: int = queue.pop(0 )
# get the last node from the path
a__: List[str] = path[-1]
if node not in explored:
a__: Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
a__: Dict = list(_SCREAMING_SNAKE_CASE )
new_path.append(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
a__: Optional[int] = [start]
a__: Union[str, Any] = set(_SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
a__: List[str] = {start: 0, target: -1}
while queue:
a__: List[str] = queue.pop(0 )
if node == target:
a__: Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
queue.append(_SCREAMING_SNAKE_CASE )
a__: str = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if height >= 1:
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE )
def __a ( ) ->List[str]:
a__: Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = 8.988E9 # units = N * m^s * C^-2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict[str, float]:
a__: List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
a__: Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__: int = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__: Optional[Any] = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__: List[Any] = (COULOMBS_CONSTANT * charge_product / abs(_SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
def __init__( self , **lowercase) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['bs4'])
super().__init__(**lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[str] = []
a__: Dict = []
a__: Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
a__: Union[str, Any] = parent.find_all(child.name , recursive=lowercase)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(lowercase) else next(i for i, s in enumerate(lowercase , 1) if s is child))
a__: List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
a__: Union[str, Any] = BeautifulSoup(lowercase , 'html.parser')
a__: Tuple = []
a__: List[str] = []
a__: Dict = []
for element in html_code.descendants:
if type(lowercase) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
a__: int = html.unescape(lowercase).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase)
a__ , a__: Tuple = self.xpath_soup(lowercase)
stringaxtag_seq.append(lowercase)
stringaxsubs_seq.append(lowercase)
if len(lowercase) != len(lowercase):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(lowercase) != len(lowercase):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Tuple = ''
for tagname, subs in zip(lowercase , lowercase):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self , lowercase) -> BatchFeature:
'''simple docstring'''
a__: Optional[Any] = False
# Check that strings has a valid type
if isinstance(lowercase , lowercase):
a__: Optional[int] = True
elif isinstance(lowercase , (list, tuple)):
if len(lowercase) == 0 or isinstance(html_strings[0] , lowercase):
a__: Tuple = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f'but is of type {type(lowercase)}.')
a__: Tuple = bool(isinstance(lowercase , (list, tuple)) and (isinstance(html_strings[0] , lowercase)))
if not is_batched:
a__: Tuple = [html_strings]
# Get nodes + xpaths
a__: int = []
a__: Optional[int] = []
for html_string in html_strings:
a__ , a__ , a__: Dict = self.get_three_from_single(lowercase)
nodes.append(lowercase)
a__: Dict = []
for node, tag_list, sub_list in zip(lowercase , lowercase , lowercase):
a__: Optional[int] = self.construct_xpath(lowercase , lowercase)
xpath_strings.append(lowercase)
xpaths.append(lowercase)
# return as Dict
a__: str = {'nodes': nodes, 'xpaths': xpaths}
a__: int = BatchFeature(data=lowercase , tensor_type=lowercase)
return encoded_inputs
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: Optional[Any] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , 'r' ).readlines()]
a__: Tuple = []
if args.gold_data_mode == "qa":
a__: Dict = pd.read_csv(_SCREAMING_SNAKE_CASE , sep='\t' , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
a__: str = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
a__: Union[str, Any] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , 'r' ).readlines()]
a__: Any = [[reference] for reference in references]
a__: str = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[str] = 100.0 * em / total
a__: Tuple = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[str] = args.k
a__: List[Any] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , 'r' ).readlines()]
a__: Optional[int] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , 'r' ).readlines()]
a__: List[str] = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Union[str, Any] = set(hypo.split('\t' )[:k] )
a__: List[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__: List[str] = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
def strip_title(_SCREAMING_SNAKE_CASE ):
if title.startswith('"' ):
a__: Union[str, Any] = title[1:]
if title.endswith('"' ):
a__: Tuple = title[:-1]
return title
a__: List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )['input_ids'].to(args.device )
a__: Dict = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
a__: int = question_enc_outputs[0]
a__: Optional[Any] = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
a__: Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__: str = []
for docs in all_docs:
a__: List[Any] = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs['title']]
provenance_strings.append('\t'.join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
with torch.no_grad():
a__: Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = inputs_dict.input_ids.to(args.device )
a__: Tuple = inputs_dict.attention_mask.to(args.device )
a__: List[Any] = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__: List[str] = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info('Q: {} - A: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def __a ( ) ->List[Any]:
a__: List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_SCREAMING_SNAKE_CASE , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_SCREAMING_SNAKE_CASE , choices=['exact', 'compressed', 'legacy'] , type=_SCREAMING_SNAKE_CASE , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_SCREAMING_SNAKE_CASE , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_SCREAMING_SNAKE_CASE , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_SCREAMING_SNAKE_CASE , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_SCREAMING_SNAKE_CASE , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_SCREAMING_SNAKE_CASE , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_SCREAMING_SNAKE_CASE , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_SCREAMING_SNAKE_CASE , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_SCREAMING_SNAKE_CASE , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
a__: Dict = parser.parse_args()
a__: List[str] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[str] = {}
if args.model_type is None:
a__: Optional[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
a__: Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
a__: Dict = args.n_docs
if args.index_name is not None:
a__: Any = args.index_name
if args.index_path is not None:
a__: Any = args.index_path
else:
a__: int = BartForConditionalGeneration
a__: List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _SCREAMING_SNAKE_CASE )
a__: Any = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
a__: str = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_SCREAMING_SNAKE_CASE ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
a__: int = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a__: int = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
a__: int = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
a__: Dict = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
a__: Optional[int] = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write('\n'.join(_SCREAMING_SNAKE_CASE ) + '\n' )
preds_file.flush()
a__: List[Any] = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
a__: Union[str, Any] = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowercase__ = get_args()
main(args)
| 290 | """simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[float, float]:
# Check if the input is valid
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
a__ , a__ , a__: Dict = equationa
a__ , a__ , a__: Dict = equationa
# Calculate the determinants of the matrices
a__: Union[str, Any] = aa * ba - aa * ba
a__: Dict = ca * ba - ca * ba
a__: Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a__: Union[str, Any] = determinant_x / determinant
a__: Any = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
a__: Tuple = 0
if start < end:
a__: Dict = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = a[end]
a__: int = a[pivot]
a__: Union[str, Any] = temp
a__ , a__: Dict = _in_place_partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , p + 1 , _SCREAMING_SNAKE_CASE )
return count
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
a__: Tuple = 0
a__: Optional[int] = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = a[end]
a__: Any = a[pivot]
a__: Optional[int] = temp
a__: Tuple = start - 1
for index in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a__: Dict = new_pivot_index + 1
a__: Optional[int] = a[new_pivot_index]
a__: List[Any] = a[index]
a__: Any = temp
a__: Tuple = a[new_pivot_index + 1]
a__: Union[str, Any] = a[end]
a__: List[Any] = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = 1
a__: Optional[int] = 3
a__: Optional[Any] = (32, 32)
a__: Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase)
return image
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
torch.manual_seed(0)
a__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(lowercase)
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
def extract(*lowercase , **lowercase):
class __snake_case :
def __init__( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = torch.ones([0])
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
self.pixel_values.to(lowercase)
return self
return Out()
return extract
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__: int = self.dummy_cond_unet
a__: List[Any] = PNDMScheduler(skip_prk_steps=lowercase)
a__: Optional[int] = self.dummy_vae
a__: str = self.dummy_text_encoder
a__: List[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
a__: Any = 77
a__: str = self.dummy_image.to(lowercase)
a__: str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a__: Tuple = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
a__: List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase)
a__: str = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__: Any = 'A painting of a squirrel eating a burger'
a__: Dict = torch.Generator(device=lowercase).manual_seed(0)
a__: str = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowercase , )
a__: Any = output.images
a__: Dict = torch.Generator(device=lowercase).manual_seed(0)
a__: List[str] = alt_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowercase , return_dict=lowercase , )[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__: List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = self.dummy_cond_unet
a__: str = PNDMScheduler(skip_prk_steps=lowercase)
a__: List[str] = self.dummy_vae
a__: Tuple = self.dummy_text_encoder
a__: Union[str, Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
a__: Union[str, Any] = 77
a__: Tuple = self.dummy_image.to(lowercase)
# put models in fp16
a__: str = unet.half()
a__: Any = vae.half()
a__: int = bert.half()
# make sure here that pndm scheduler skips prk
a__: Any = AltDiffusionImgaImgPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
a__: Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase)
a__: Optional[int] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
a__: List[Any] = 'A painting of a squirrel eating a burger'
a__: int = torch.manual_seed(0)
a__: Any = alt_pipe(
[prompt] , generator=lowercase , num_inference_steps=2 , output_type='np' , image=lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
a__: int = init_image.resize((7_60, 5_04))
a__: Any = 'BAAI/AltDiffusion'
a__: List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
pipe.enable_attention_slicing()
a__: str = 'A fantasy landscape, trending on artstation'
a__: int = torch.manual_seed(0)
a__: List[str] = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type='np' , )
a__: str = output.images[0]
a__: Optional[int] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
a__: Union[str, Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a__: int = init_image.resize((7_68, 5_12))
a__: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
a__: Any = 'BAAI/AltDiffusion'
a__: str = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase , safety_checker=lowercase , )
pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
pipe.enable_attention_slicing()
a__: Optional[int] = 'A fantasy landscape, trending on artstation'
a__: Union[str, Any] = torch.manual_seed(0)
a__: Tuple = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , generator=lowercase , output_type='np' , )
a__: Any = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase__ = '__DUMMY_TRANSFORMERS_USER__'
lowercase__ = 'Dummy User'
lowercase__ = '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'
lowercase__ = 'https://hub-ci.huggingface.co'
lowercase__ = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowercase__ = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowercase__ = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __a ( ) ->Optional[int]:
return HfApi(endpoint=_SCREAMING_SNAKE_CASE )
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
def _cleanup_repo(_SCREAMING_SNAKE_CASE ):
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
@contextmanager
def _temporary_repo(_SCREAMING_SNAKE_CASE ):
try:
yield repo_id
finally:
cleanup_repo(_SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[Any] = F'repo_txt_data-{int(time.time() * 10e3 )}'
a__: str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data/text_data.txt' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: Optional[Any] = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
a__: Any = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
a__: List[Any] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
return hf_private_dataset_repo_zipped_img_data_
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _SCREAMING_SNAKE_CASE ) ->dict[str, str]:
a__: str = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a__: str = remove_duplicates(key.upper() )
a__: Dict = len(_SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
a__: Optional[int] = {alphabet[i]: char for i, char in enumerate(_SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_SCREAMING_SNAKE_CASE ) , 26 ):
a__: Any = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a__: Any = alphabet[i - offset]
a__: Any = char
return cipher_alphabet
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return "".join(cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: str = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __a ( ) ->None:
a__: List[Any] = input('Enter message to encode or decode: ' ).strip()
a__: Union[str, Any] = input('Enter keyword: ' ).strip()
a__: Optional[Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
a__: Optional[int] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
a__: Dict = create_cipher_map(_SCREAMING_SNAKE_CASE )
print(func(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 290 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase__ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowercase__ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowercase__ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
a__: Union[str, Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__: Any = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase)
if use_aggregator:
a__: Dict = scoring.BootstrapAggregator()
else:
a__: Optional[Any] = []
for ref, pred in zip(lowercase , lowercase):
a__: List[str] = scorer.score(lowercase , lowercase)
if use_aggregator:
aggregator.add_scores(lowercase)
else:
scores.append(lowercase)
if use_aggregator:
a__: List[str] = aggregator.aggregate()
else:
a__: Any = {}
for key in scores[0]:
a__: Optional[Any] = [score[key] for score in scores]
return result
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __a ( _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
if subparsers is not None:
a__: Optional[int] = subparsers.add_parser('tpu-config' , description=_description )
else:
a__: Optional[Any] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
a__: Dict = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
a__: List[Any] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __a ( _SCREAMING_SNAKE_CASE ) ->Dict:
a__: List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
a__: Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a__: Optional[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
a__: List[Any] = defaults.commands
if not args.tpu_name:
a__: Optional[int] = defaults.tpu_name
if not args.tpu_zone:
a__: Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
a__: Any = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
a__: int = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
a__: Tuple = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
a__: Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
a__: int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a__: Tuple = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
a__: List[Any] = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a__: List[str] = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __a ( ) ->str:
a__: Optional[int] = tpu_command_parser()
a__: Any = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
a__ = (DEISMultistepScheduler,)
a__ = (("""num_inference_steps""", 25),)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
a__: Dict = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowercase)
return config
def lowerCamelCase_ ( self , lowercase=0 , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = dict(self.forward_default_kwargs)
a__: int = kwargs.pop('num_inference_steps' , lowercase)
a__: Union[str, Any] = self.dummy_sample
a__: Optional[int] = 0.1 * sample
a__: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__: str = self.get_scheduler_config(**lowercase)
a__: Optional[int] = scheduler_class(**lowercase)
scheduler.set_timesteps(lowercase)
# copy over dummy past residuals
a__: Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase)
a__: Dict = scheduler_class.from_pretrained(lowercase)
new_scheduler.set_timesteps(lowercase)
# copy over dummy past residuals
a__: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ , a__: Dict = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1):
a__: List[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
a__: Optional[int] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self , lowercase=0 , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__: int = dict(self.forward_default_kwargs)
a__: List[Any] = kwargs.pop('num_inference_steps' , lowercase)
a__: List[Any] = self.dummy_sample
a__: Dict = 0.1 * sample
a__: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__: Dict = self.get_scheduler_config()
a__: Dict = scheduler_class(**lowercase)
scheduler.set_timesteps(lowercase)
# copy over dummy past residuals (must be after setting timesteps)
a__: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase)
a__: Union[str, Any] = scheduler_class.from_pretrained(lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase)
# copy over dummy past residual (must be after setting timesteps)
a__: List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__: Optional[int] = scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
a__: Any = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
if scheduler is None:
a__: Union[str, Any] = self.scheduler_classes[0]
a__: str = self.get_scheduler_config(**lowercase)
a__: Any = scheduler_class(**lowercase)
a__: Any = self.scheduler_classes[0]
a__: Union[str, Any] = self.get_scheduler_config(**lowercase)
a__: Any = scheduler_class(**lowercase)
a__: Union[str, Any] = 10
a__: Optional[Any] = self.dummy_model()
a__: Any = self.dummy_sample_deter
scheduler.set_timesteps(lowercase)
for i, t in enumerate(scheduler.timesteps):
a__: Optional[int] = model(lowercase , lowercase)
a__: List[Any] = scheduler.step(lowercase , lowercase , lowercase).prev_sample
return sample
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = dict(self.forward_default_kwargs)
a__: Optional[int] = kwargs.pop('num_inference_steps' , lowercase)
for scheduler_class in self.scheduler_classes:
a__: List[str] = self.get_scheduler_config()
a__: List[str] = scheduler_class(**lowercase)
a__: Union[str, Any] = self.dummy_sample
a__: Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , 'set_timesteps'):
scheduler.set_timesteps(lowercase)
elif num_inference_steps is not None and not hasattr(lowercase , 'set_timesteps'):
a__: str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a__: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
a__: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
a__: Optional[int] = scheduler.timesteps[5]
a__: List[Any] = scheduler.timesteps[6]
a__: Optional[int] = scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
a__: List[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = DEISMultistepScheduler(**self.get_scheduler_config())
a__: List[Any] = self.full_loop(scheduler=lowercase)
a__: str = torch.mean(torch.abs(lowercase))
assert abs(result_mean.item() - 0.23916) < 1e-3
a__: Any = DPMSolverSinglestepScheduler.from_config(scheduler.config)
a__: str = DPMSolverMultistepScheduler.from_config(scheduler.config)
a__: str = UniPCMultistepScheduler.from_config(scheduler.config)
a__: Tuple = DEISMultistepScheduler.from_config(scheduler.config)
a__: str = self.full_loop(scheduler=lowercase)
a__: List[str] = torch.mean(torch.abs(lowercase))
assert abs(result_mean.item() - 0.23916) < 1e-3
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='deis' , solver_order=lowercase , solver_type=lowercase , )
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
a__: str = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase).any(), "Samples have nan numbers"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase)
self.check_over_configs(lower_order_final=lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = self.full_loop()
a__: Tuple = torch.mean(torch.abs(lowercase))
assert abs(result_mean.item() - 0.23916) < 1e-3
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Dict = self.full_loop(prediction_type='v_prediction')
a__: Any = torch.mean(torch.abs(lowercase))
assert abs(result_mean.item() - 0.091) < 1e-3
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = self.scheduler_classes[0]
a__: List[str] = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0)
a__: Any = scheduler_class(**lowercase)
a__: Tuple = 10
a__: Dict = self.dummy_model()
a__: Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase)
for i, t in enumerate(scheduler.timesteps):
a__: Any = model(lowercase , lowercase)
a__: Any = scheduler.step(lowercase , lowercase , lowercase).prev_sample
assert sample.dtype == torch.floataa
| 290 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 1 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->np.array:
a__: Union[str, Any] = F'{sampling_rate}'
a__: str = '1'
a__: Dict = 'f32le'
a__: List[str] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__: int = ffmpeg_process.communicate(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
a__: Union[str, Any] = output_stream[0]
a__: Optional[Any] = np.frombuffer(_SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "f32le" , ) ->Optional[int]:
a__: Optional[Any] = F'{sampling_rate}'
a__: str = '1'
if format_for_conversion == "s16le":
a__: List[str] = 2
elif format_for_conversion == "f32le":
a__: Optional[int] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
a__: List[Any] = platform.system()
if system == "Linux":
a__: Tuple = 'alsa'
a__: Dict = 'default'
elif system == "Darwin":
a__: Tuple = 'avfoundation'
a__: List[str] = ':0'
elif system == "Windows":
a__: Optional[Any] = 'dshow'
a__: List[Any] = 'default'
a__: List[str] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
a__: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__: Tuple = _ffmpeg_stream(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "f32le" , ) ->Union[str, Any]:
if stream_chunk_s is not None:
a__: Union[str, Any] = stream_chunk_s
else:
a__: Tuple = chunk_length_s
a__: int = ffmpeg_microphone(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , format_for_conversion=_SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
a__: List[Any] = np.intaa
a__: Union[str, Any] = 2
elif format_for_conversion == "f32le":
a__: List[Any] = np.floataa
a__: Any = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
a__: Tuple = chunk_length_s / 6
a__: List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
a__: str = [stride_length_s, stride_length_s]
a__: Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__: Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__: List[str] = datetime.datetime.now()
a__: Optional[int] = datetime.timedelta(seconds=_SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=_SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
a__: int = np.frombuffer(item['raw'] , dtype=_SCREAMING_SNAKE_CASE )
a__: List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
a__: Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
a__: int = b''
a__ , a__: Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
a__: Dict = 0
for raw in iterator:
acc += raw
if stream and len(_SCREAMING_SNAKE_CASE ) < chunk_len:
a__: str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
a__: Union[str, Any] = (_stride_left, stride_right)
a__: int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
a__: Tuple = False
yield item
a__: List[str] = stride_left
a__: Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_SCREAMING_SNAKE_CASE ) > stride_left:
a__: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
a__: Optional[int] = False
yield item
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Union[str, Any] = 2**24 # 16Mo
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=_SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
a__: List[Any] = ffmpeg_process.stdout.read(_SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __snake_case ( __lowerCAmelCase ):
a__ = 42
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
a__: Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}')
a__: List[Any] = latents.to(lowercase)
a__: Any = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self , lowercase=0) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
a__: Union[str, Any] = torch.device(f'cuda:{gpu_id}')
a__: List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase)
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
if self.device != torch.device('meta') or not hasattr(self.image_encoder , '_hf_hook'):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> str:
'''simple docstring'''
if isinstance(lowercase , lowercase) and isinstance(image[0] , torch.Tensor):
a__: Any = torch.cat(lowercase , axis=0) if image[0].ndim == 4 else torch.stack(lowercase , axis=0)
if not isinstance(lowercase , torch.Tensor):
a__: List[Any] = self.image_processor(lowercase , return_tensors='pt').pixel_values[0].unsqueeze(0)
a__: str = image.to(dtype=self.image_encoder.dtype , device=lowercase)
a__: str = self.image_encoder(lowercase)['last_hidden_state']
a__: List[str] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__: List[str] = image_embeds.repeat_interleave(lowercase , dim=0)
if do_classifier_free_guidance:
a__: Optional[Any] = torch.zeros_like(lowercase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__: Optional[int] = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase)
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase , PIL.Image.Image):
a__: Any = 1
elif isinstance(lowercase , torch.Tensor):
a__: Tuple = image.shape[0]
elif isinstance(lowercase , lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
a__: Any = len(lowercase)
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase)}')
a__: Any = self._execution_device
a__: Optional[Any] = batch_size * num_images_per_prompt
a__: Optional[Any] = guidance_scale > 1.0
a__: Dict = self._encode_image(lowercase , lowercase , lowercase , lowercase)
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase)
a__: Dict = self.scheduler.timesteps
a__: List[Any] = self.prior.config.num_embeddings
a__: List[str] = self.prior.config.embedding_dim
a__: Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__: List[Any] = latents.reshape(latents.shape[0] , lowercase , lowercase)
for i, t in enumerate(self.progress_bar(lowercase)):
# expand the latents if we are doing classifier free guidance
a__: Dict = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a__: Tuple = self.scheduler.scale_model_input(lowercase , lowercase)
a__: Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
a__ , a__: List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__: str = noise_pred.chunk(2)
a__: Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__: str = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase)
a__: Any = []
for i, latent in enumerate(lowercase):
print()
a__: int = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowercase)
a__: int = torch.stack(lowercase)
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}')
a__: List[str] = images.cpu().numpy()
if output_type == "pil":
a__: Optional[int] = [self.numpy_to_pil(lowercase) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
"""simple docstring"""
lowercase__ = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) ->Any:
a__: Union[str, Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
a__: List[Any] = '636036'
a__: List[str] = F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[str] = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
a__: Dict = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
a__: Any = workflow_run['id']
break
return workflow_run_id
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[str] = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
a__: Tuple = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
a__: Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Optional[Any] = {}
for artifact_name in artifact_names:
a__: Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
a__: Tuple = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
a__: Optional[Any] = f.read().decode('UTF-8' )
return results
| 290 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 1 |
"""simple docstring"""
from math import factorial
def __a ( _SCREAMING_SNAKE_CASE = 100 ) ->int:
return sum(map(_SCREAMING_SNAKE_CASE , str(factorial(_SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 290 | """simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( _SCREAMING_SNAKE_CASE = 0.1 ) ->int:
a__: str = 3
a__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
a__: Any = VideoMAEConfig()
set_architecture_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "finetuned" not in model_name:
a__: Optional[Any] = False
if "finetuned" in model_name:
a__: List[Any] = 'huggingface/label-files'
if "kinetics" in model_name:
a__: Tuple = 400
a__: List[str] = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
a__: Optional[int] = 174
a__: Optional[Any] = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
a__: List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
a__: List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a__: str = idalabel
a__: int = {v: k for k, v in idalabel.items()}
return config
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if "small" in model_name:
a__: List[Any] = 384
a__: int = 1536
a__: Union[str, Any] = 12
a__: Tuple = 16
a__: Optional[Any] = 12
a__: List[Any] = 3
a__: List[str] = 192
a__: Optional[Any] = 768
elif "large" in model_name:
a__: Optional[Any] = 1024
a__: Any = 4096
a__: int = 24
a__: int = 16
a__: Dict = 12
a__: Optional[int] = 8
a__: Union[str, Any] = 512
a__: Any = 2048
elif "huge" in model_name:
a__: Any = 1280
a__: Dict = 5120
a__: Optional[Any] = 32
a__: List[Any] = 16
a__: Optional[int] = 12
a__: Dict = 8
a__: Any = 640
a__: Union[str, Any] = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if "encoder." in name:
a__: Union[str, Any] = name.replace('encoder.' , '' )
if "cls_token" in name:
a__: int = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
a__: int = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
a__: List[str] = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a__: Dict = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a__: Optional[int] = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
a__: Optional[int] = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
a__: Optional[int] = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
a__: Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
a__: List[Any] = name.replace('attn' , 'attention.self' )
if "attn" in name:
a__: Dict = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
a__: Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__: Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__: List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__: Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
a__: Dict = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
a__: Union[str, Any] = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
a__: List[str] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a__: Any = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a__: Optional[Any] = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
a__: Any = name.replace('head' , 'classifier' )
return name
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for key in orig_state_dict.copy().keys():
a__: Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith('encoder.' ):
a__: List[Any] = key.replace('encoder.' , '' )
if "qkv" in key:
a__: str = key.split('.' )
if key.startswith('decoder.blocks' ):
a__: List[str] = config.decoder_hidden_size
a__: Optional[Any] = int(key_split[2] )
a__: Optional[int] = 'decoder.decoder_layers.'
if "weight" in key:
a__: List[str] = val[:dim, :]
a__: Union[str, Any] = val[dim : dim * 2, :]
a__: int = val[-dim:, :]
else:
a__: str = config.hidden_size
a__: List[str] = int(key_split[1] )
a__: Tuple = 'videomae.encoder.layer.'
if "weight" in key:
a__: Optional[int] = val[:dim, :]
a__: Tuple = val[dim : dim * 2, :]
a__: Tuple = val[-dim:, :]
else:
a__: Any = val
return orig_state_dict
def __a ( ) ->Any:
a__: List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__: Optional[Any] = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: Tuple = get_videomae_config(_SCREAMING_SNAKE_CASE )
if "finetuned" in model_name:
a__: Union[str, Any] = VideoMAEForVideoClassification(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = VideoMAEForPreTraining(_SCREAMING_SNAKE_CASE )
# download original checkpoint, hosted on Google Drive
a__: Any = 'pytorch_model.bin'
gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
if "model" in files:
a__: List[str] = files['model']
else:
a__: List[str] = files['module']
a__: List[str] = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify model on basic input
a__: Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
a__: Tuple = prepare_video()
a__: Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
if "finetuned" not in model_name:
a__: List[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
a__: Any = torch.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
a__: Dict = outputs.logits
a__: Dict = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a__: Dict = torch.Size([1, 400] )
a__: List[str] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
a__: Optional[int] = torch.Size([1, 174] )
a__: List[Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
a__: Tuple = torch.Size([1, 1408, 1536] )
a__: List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
a__: Optional[int] = torch.Size([1, 1408, 1536] )
a__: Union[str, Any] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
a__: Optional[Any] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
a__: Optional[Any] = torch.Size([1, 1408, 1536] )
a__: List[Any] = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
a__: Dict = torch.Size([1, 400] )
a__: Union[str, Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
a__: Tuple = torch.Size([1, 400] )
a__: Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
a__: Tuple = torch.Size([1, 400] )
a__: Dict = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
a__: List[str] = torch.Size([1, 400] )
a__: Union[str, Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
a__: List[str] = torch.Size([1, 1408, 1536] )
a__: List[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
a__: str = torch.Size([1, 174] )
a__: Optional[int] = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
a__: str = torch.Size([1, 1408, 1536] )
a__: int = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
a__: Optional[int] = torch.Size([1, 174] )
a__: Optional[int] = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
a__: int = outputs.loss
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 290 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = TextToVideoSDPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
a__: List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0)
a__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
a__: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
a__: Dict = CLIPTextModel(lowercase)
a__: List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
a__: int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Dict:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__: List[str] = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__: int = self.get_dummy_components()
a__: Union[str, Any] = TextToVideoSDPipeline(**lowercase)
a__: Dict = sd_pipe.to(lowercase)
sd_pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = self.get_dummy_inputs(lowercase)
a__: Dict = 'np'
a__: Union[str, Any] = sd_pipe(**lowercase).frames
a__: Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
a__: Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase , expected_max_diff=1e-2)
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy')
a__: Optional[Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
a__: int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a__: Optional[Any] = pipe.to('cuda')
a__: str = 'Spiderman is surfing'
a__: str = torch.Generator(device='cpu').manual_seed(0)
a__: str = pipe(lowercase , generator=lowercase , num_inference_steps=25 , output_type='pt').frames
a__: List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy')
a__: int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
a__: Union[str, Any] = pipe.to('cuda')
a__: List[Any] = 'Spiderman is surfing'
a__: Tuple = torch.Generator(device='cpu').manual_seed(0)
a__: Optional[int] = pipe(lowercase , generator=lowercase , num_inference_steps=2 , output_type='pt').frames
a__: str = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 290 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase__ = logging.get_logger(__name__)
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: Tuple = r'\w+[.]\d+'
a__: Optional[Any] = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
a__: str = key.replace(_SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Dict = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__: Dict = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__: Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__: Tuple = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__: Tuple = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__: List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__: int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
a__: str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__: str = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__: Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=42 ) ->List[str]:
# Step 1: Convert pytorch tensor to numpy
a__: List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__: Dict = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
a__: Any = flatten_dict(_SCREAMING_SNAKE_CASE )
a__: str = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__: Optional[int] = rename_key(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
a__ , a__: str = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
a__: Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 290 | """simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
# word like '180' or '身高' or '神'
for char in word:
a__: Any = ord(_SCREAMING_SNAKE_CASE )
if not _is_chinese_char(_SCREAMING_SNAKE_CASE ):
return 0
return 1
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Tuple = set()
for token in tokens:
a__: List[Any] = len(_SCREAMING_SNAKE_CASE ) > 1 and is_chinese(_SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(_SCREAMING_SNAKE_CASE )
a__: List[str] = list(_SCREAMING_SNAKE_CASE )
return word_list
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
if not chinese_word_set:
return bert_tokens
a__: Tuple = max([len(_SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
a__: List[str] = bert_tokens
a__ , a__: Optional[int] = 0, len(_SCREAMING_SNAKE_CASE )
while start < end:
a__: Optional[Any] = True
if is_chinese(bert_word[start] ):
a__: Tuple = min(end - start , _SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , 1 , -1 ):
a__: int = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a__: List[Any] = '##' + bert_word[j]
a__: List[Any] = start + i
a__: List[str] = False
break
if single_word:
start += 1
return bert_word
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a__: Dict = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
a__: str = [get_chinese_word(_SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a__: Dict = []
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 100 ):
a__: int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
a__: Tuple = []
for input_ids, chinese_word in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: int = []
for id in input_ids:
a__: Union[str, Any] = bert_tokenizer._convert_id_to_token(_SCREAMING_SNAKE_CASE )
input_tokens.append(_SCREAMING_SNAKE_CASE )
a__: List[Any] = add_sub_symbol(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
a__: Any = token[2:]
# save chinese tokens' pos
if len(_SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(_SCREAMING_SNAKE_CASE ) ):
ref_id.append(_SCREAMING_SNAKE_CASE )
ref_ids.append(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
return ref_ids
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a__: Optional[Any] = f.readlines()
a__: Tuple = [line.strip() for line in data if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a__: Union[str, Any] = LTP(args.ltp ) # faster in GPU device
a__: Tuple = BertTokenizer.from_pretrained(args.bert )
a__: List[str] = prepare_ref(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a__: Optional[int] = [json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' for ref in ref_ids]
f.writelines(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowercase__ = parser.parse_args()
main(args)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if height >= 1:
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE )
def __a ( ) ->List[str]:
a__: Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 290 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: Union[str, Any] = {}
a__: List[str] = os.path.join(_SCREAMING_SNAKE_CASE , 'all_results.json' )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
a__: List[Any] = json.load(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'can\'t find {path}' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
import xla_spawn
a__: Optional[Any] = self.get_auto_remove_tmp_dir()
a__: Optional[Any] = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
a__: Any = time()
xla_spawn.main()
a__: Union[str, Any] = time()
a__: Tuple = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
import xla_spawn
a__: Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
xla_spawn.main()
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=lowercase , )
assert hasattr(self , 'env')
def lowerCamelCase_ ( self , lowercase=1) -> Union[str, Any]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-single' , instance_count=lowercase , instance_type=self.instance_type , debugger_hook_config=lowercase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
TrainingJobAnalytics(lowercase).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__: int = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
a__: Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
a__: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__: Any = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowercase)
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'spiece.model'}
lowercase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase__ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
a__ = []
def __init__( self , lowercase , lowercase="<unk>" , lowercase="<s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="[SEP]" , lowercase="[MASK]" , lowercase="[CLS]" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__: str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else bos_token
a__: Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else eos_token
a__: int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else unk_token
a__: List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else pad_token
a__: int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else cls_token
a__: Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a__: int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
a__: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , mask_token=lowercase , cls_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
a__: List[str] = vocab_file
a__: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase)
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = {self.convert_ids_to_tokens(lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = self.__dict__.copy()
a__: str = None
return state
def __setstate__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__: int = {}
a__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase)
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
a__: List[str] = self.sp_model.IdToPiece(lowercase)
return token
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: str = []
a__: Dict = ''
a__: List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase) + token
a__: Union[str, Any] = True
a__: int = []
else:
current_sub_tokens.append(lowercase)
a__: int = False
out_string += self.sp_model.decode(lowercase)
return out_string.strip()
def lowerCamelCase_ ( self , lowercase , lowercase = False , lowercase = None , lowercase = True , **lowercase , ) -> str:
'''simple docstring'''
a__: Tuple = kwargs.pop('use_source_tokenizer' , lowercase)
a__: Optional[int] = self.convert_ids_to_tokens(lowercase , skip_special_tokens=lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: str = []
a__: str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase))
a__: Optional[Any] = []
sub_texts.append(lowercase)
else:
current_sub_text.append(lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a__: int = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(lowercase))
else:
a__: Optional[int] = ''.join(lowercase)
a__: Dict = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: str = self.clean_up_tokenization(lowercase)
return clean_text
else:
return text
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
a__: List[str] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase)
elif not os.path.isfile(self.vocab_file):
with open(lowercase , 'wb') as fi:
a__: str = self.sp_model.serialized_model_proto()
fi.write(lowercase)
return (out_vocab_file,)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__: Any = [self.cls_token_id]
a__: List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Union[str, Any] = [self.sep_token_id]
a__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 290 | """simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
if len(_SCREAMING_SNAKE_CASE ) != 2 or len(a[0] ) != 2 or len(_SCREAMING_SNAKE_CASE ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
a__: List[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_SCREAMING_SNAKE_CASE ) )
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_SCREAMING_SNAKE_CASE ) )
]
def __a ( _SCREAMING_SNAKE_CASE ) ->tuple[list, list, list, list]:
if len(_SCREAMING_SNAKE_CASE ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
a__: Tuple = len(_SCREAMING_SNAKE_CASE )
a__: int = matrix_length // 2
a__: int = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )]
a__: int = [
[a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
a__: Optional[Any] = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )]
a__: Dict = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return top_left, top_right, bot_left, bot_right
def __a ( _SCREAMING_SNAKE_CASE ) ->tuple[int, int]:
return len(_SCREAMING_SNAKE_CASE ), len(matrix[0] )
def __a ( _SCREAMING_SNAKE_CASE ) ->None:
print('\n'.join(str(_SCREAMING_SNAKE_CASE ) for line in matrix ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
if matrix_dimensions(_SCREAMING_SNAKE_CASE ) == (2, 2):
return default_matrix_multiplication(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__ , a__ , a__: str = split_matrix(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__ , a__: Dict = split_matrix(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: str = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: int = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: Optional[int] = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: List[Any] = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: List[str] = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: Optional[Any] = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: Optional[Any] = matrix_addition(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: Optional[int] = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: str = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# construct the new matrix from our 4 quadrants
a__: Optional[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
if matrix_dimensions(_SCREAMING_SNAKE_CASE )[1] != matrix_dimensions(_SCREAMING_SNAKE_CASE )[0]:
a__: int = (
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(_SCREAMING_SNAKE_CASE )
a__: List[Any] = matrix_dimensions(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = matrix_dimensions(_SCREAMING_SNAKE_CASE )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
a__: Optional[int] = max(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = int(math.pow(2 , math.ceil(math.loga(_SCREAMING_SNAKE_CASE ) ) ) )
a__: int = matrixa
a__: Any = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
a__: Any = actual_strassen(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Removing the additional zeros
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=4_00 , lowercase=True , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
a__: Any = parent
a__: List[Any] = batch_size
a__: Dict = num_channels
a__: Optional[Any] = image_size
a__: Optional[int] = min_resolution
a__: Union[str, Any] = max_resolution
a__: Any = do_resize
a__: Tuple = size if size is not None else {'height': 18, 'width': 20}
a__: int = do_thumbnail
a__: Union[str, Any] = do_align_axis
a__: Optional[Any] = do_pad
a__: Dict = do_normalize
a__: str = image_mean
a__: Union[str, Any] = image_std
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = DonutImageProcessingTester(self)
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase , 'do_resize'))
self.assertTrue(hasattr(lowercase , 'size'))
self.assertTrue(hasattr(lowercase , 'do_thumbnail'))
self.assertTrue(hasattr(lowercase , 'do_align_long_axis'))
self.assertTrue(hasattr(lowercase , 'do_pad'))
self.assertTrue(hasattr(lowercase , 'do_normalize'))
self.assertTrue(hasattr(lowercase , 'image_mean'))
self.assertTrue(hasattr(lowercase , 'image_std'))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[str] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 20})
a__: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
# Previous config had dimensions in (width, height) order
a__: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'height': 84, 'width': 42})
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@is_flaky()
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image)
# Test not batched input
a__: int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__: Dict = image_processing(lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray)
# Test not batched input
a__: str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__: Tuple = image_processing(lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor)
# Test not batched input
a__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__: List[Any] = image_processing(lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 290 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = inspect.getfile(accelerate.test_utils)
a__: Any = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
a__: Dict = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
a__: Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(lowercase , env=os.environ.copy())
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Optional[int] = dataset
a__: Union[str, Any] = process
a__: Any = params
def __len__( self) -> List[str]:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , lowercase) -> Tuple:
'''simple docstring'''
a__: int = self.dataset[i]
a__: Any = self.process(lowercase , **self.params)
return processed
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None) -> Dict:
'''simple docstring'''
a__: List[Any] = loader
a__: Any = infer
a__: List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
a__: Optional[Any] = None
a__: List[str] = loader_batch_size
# Internal bookkeeping
a__: int = None
a__: List[str] = None
def __len__( self) -> Union[str, Any]:
'''simple docstring'''
return len(self.loader)
def __iter__( self) -> int:
'''simple docstring'''
a__: Optional[int] = iter(self.loader)
return self
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
a__: List[str] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
a__: List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase , lowercase):
# Convert ModelOutput to tuple first
a__: Optional[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
a__: Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__: List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase , lowercase):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
a__: Any = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a__: Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
a__: List[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__: List[str] = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a__: Optional[int] = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
a__: int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
a__: List[Any] = self._loader_batch_data.__class__(lowercase)
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
a__: Union[str, Any] = next(self.iterator)
a__: Any = self.infer(lowercase , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase , torch.Tensor):
a__: int = processed
else:
a__: List[str] = list(processed.keys())[0]
a__: str = processed[key]
if isinstance(lowercase , lowercase):
a__: Tuple = len(lowercase)
else:
a__: int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__: Any = observed_batch_size
# Setting internal index to unwrap the batch
a__: Dict = processed
a__: int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase)
def __iter__( self) -> Any:
'''simple docstring'''
a__: Dict = iter(self.loader)
a__: List[str] = None
return self
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
if self.subiterator is None:
a__: List[str] = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
a__: Union[str, Any] = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
a__: Union[str, Any] = self.infer(next(self.iterator) , **self.params)
a__: List[Any] = next(self.subiterator)
return processed
class __snake_case ( __lowerCAmelCase ):
def __iter__( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = iter(self.loader)
return self
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = False
a__: Optional[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
a__: int = self.loader_batch_item()
a__: Tuple = item.pop('is_last')
accumulator.append(lowercase)
if is_last:
return accumulator
while not is_last:
a__: Any = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(lowercase , torch.Tensor):
a__: Optional[Any] = processed
else:
a__: List[Any] = list(processed.keys())[0]
a__: Union[str, Any] = processed[key]
if isinstance(lowercase , lowercase):
a__: Dict = len(lowercase)
else:
a__: Optional[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a__: str = observed_batch_size
a__: List[str] = processed
a__: Optional[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
a__: int = self.loader_batch_item()
a__: Tuple = item.pop('is_last')
accumulator.append(lowercase)
if is_last:
return accumulator
else:
a__: List[str] = processed
a__: List[Any] = item.pop('is_last')
accumulator.append(lowercase)
return accumulator
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Tuple = dataset
a__: List[Any] = key
def __len__( self) -> str:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , lowercase) -> Optional[Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: int = dataset
a__: str = keya
a__: Any = keya
def __len__( self) -> List[str]:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , lowercase) -> Optional[Any]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> Tuple:
'''simple docstring'''
a__: Any = parent
a__: int = batch_size
a__: str = seq_length
a__: Union[str, Any] = is_training
a__: str = use_attention_mask
a__: Tuple = use_token_type_ids
a__: List[Any] = use_labels
a__: str = vocab_size
a__: int = hidden_size
a__: Tuple = num_hidden_layers
a__: Any = num_attention_heads
a__: Optional[int] = intermediate_size
a__: List[str] = hidden_act
a__: List[str] = hidden_dropout_prob
a__: Optional[Any] = attention_probs_dropout_prob
a__: List[Any] = max_position_embeddings
a__: Dict = type_vocab_size
a__: Optional[Any] = type_sequence_label_size
a__: List[str] = initializer_range
a__: Tuple = num_choices
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Union[str, Any] = None
if self.use_attention_mask:
a__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__: Dict = None
if self.use_token_type_ids:
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: Tuple = config_and_inputs
a__: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: Any = config_and_inputs
a__: Optional[int] = True
a__: List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = True
a__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = FlaxRobertaPreLayerNormModelTester(self)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__: List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase)
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: Union[str, Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa)
a__: Optional[int] = model(lowercase)[0]
a__: Dict = [1, 11, 5_02_65]
self.assertEqual(list(output.shape) , lowercase)
# compare the actual values for a slice.
a__: Optional[int] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Dict = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: Dict = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa)
a__: Union[str, Any] = model(lowercase)[0]
# compare the actual values for a slice.
a__: str = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 290 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = XLNetTokenizer
a__ = XLNetTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Dict = XLNetTokenizer(lowercase , keep_accents=lowercase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Any = '<s>'
a__: Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(lowercase) , 10_06)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = XLNetTokenizer(lowercase , keep_accents=lowercase)
a__: List[Any] = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , [2_85, 46, 10, 1_70, 3_82])
a__: Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a__: str = tokenizer.convert_tokens_to_ids(lowercase)
self.assertListEqual(lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4])
a__: List[Any] = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = XLNetTokenizer(lowercase , do_lower_case=lowercase)
a__: Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = XLNetTokenizer(lowercase , do_lower_case=lowercase)
a__: Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = XLNetTokenizer.from_pretrained('xlnet-base-cased')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: Tuple = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __snake_case ( __lowerCAmelCase ):
a__ = """altclip_text_model"""
def __init__( self , lowercase=25_00_02 , lowercase=10_24 , lowercase=24 , lowercase=16 , lowercase=40_96 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_14 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1e-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=7_68 , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Tuple = vocab_size
a__: str = hidden_size
a__: Optional[Any] = num_hidden_layers
a__: Tuple = num_attention_heads
a__: Union[str, Any] = hidden_act
a__: List[str] = intermediate_size
a__: int = hidden_dropout_prob
a__: List[str] = attention_probs_dropout_prob
a__: List[Any] = max_position_embeddings
a__: str = type_vocab_size
a__: Dict = initializer_range
a__: List[str] = initializer_factor
a__: Optional[int] = layer_norm_eps
a__: int = position_embedding_type
a__: Dict = use_cache
a__: Optional[Any] = project_dim
class __snake_case ( __lowerCAmelCase ):
a__ = """altclip_vision_model"""
def __init__( self , lowercase=7_68 , lowercase=30_72 , lowercase=5_12 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=2_24 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase)
a__: Optional[Any] = hidden_size
a__: Optional[int] = intermediate_size
a__: Optional[Any] = projection_dim
a__: str = num_hidden_layers
a__: Optional[int] = num_attention_heads
a__: List[str] = num_channels
a__: Optional[int] = patch_size
a__: Any = image_size
a__: List[Any] = initializer_range
a__: str = initializer_factor
a__: Optional[int] = attention_dropout
a__: Dict = layer_norm_eps
a__: Union[str, Any] = hidden_act
@classmethod
def lowerCamelCase_ ( cls , lowercase , **lowercase) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase)
a__ , a__: int = cls.get_config_dict(lowercase , **lowercase)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type') == "altclip":
a__: str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase , **lowercase)
class __snake_case ( __lowerCAmelCase ):
a__ = """altclip"""
a__ = True
def __init__( self , lowercase=None , lowercase=None , lowercase=7_68 , lowercase=2.6592 , **lowercase) -> str:
'''simple docstring'''
a__: List[Any] = kwargs.pop('text_config_dict' , lowercase)
a__: Tuple = kwargs.pop('vision_config_dict' , lowercase)
super().__init__(**lowercase)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
a__: str = {}
# This is the complete result when using `text_config_dict`.
a__: int = AltCLIPTextConfig(**lowercase).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
a__: Union[str, Any] = (
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a__: str = (
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowercase)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
a__: Optional[Any] = {}
# This is the complete result when using `vision_config_dict`.
a__: List[str] = AltCLIPVisionConfig(**lowercase).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
a__: str = {
str(lowercase): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
a__: List[str] = (
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a__: Optional[Any] = (
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowercase)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
a__: Optional[Any] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.')
if vision_config is None:
a__: Union[str, Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.')
a__: int = AltCLIPTextConfig(**lowercase)
a__: Optional[int] = AltCLIPVisionConfig(**lowercase)
a__: Optional[Any] = projection_dim
a__: List[str] = logit_scale_init_value
a__: int = 1.0
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , **lowercase) -> Any:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = copy.deepcopy(self.__dict__)
a__: Tuple = self.text_config.to_dict()
a__: Optional[int] = self.vision_config.to_dict()
a__: str = self.__class__.model_type
return output
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __snake_case :
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: int = pos_x
a__: Union[str, Any] = pos_y
a__: Any = (pos_y, pos_x)
a__: List[str] = goal_x
a__: Optional[Any] = goal_y
a__: Dict = parent
class __snake_case :
def __init__( self , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: str = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase)
a__: Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase)
a__: int = [self.start]
a__: List[str] = False
def lowerCamelCase_ ( self) -> Path | None:
'''simple docstring'''
while self.node_queue:
a__: int = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
a__: Optional[Any] = True
return self.retrace_path(lowercase)
a__: Dict = self.get_successors(lowercase)
for node in successors:
self.node_queue.append(lowercase)
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase_ ( self , lowercase) -> list[Node]:
'''simple docstring'''
a__: Any = []
for action in delta:
a__: List[Any] = parent.pos_x + action[1]
a__: List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase , lowercase , self.target.pos_y , self.target.pos_x , lowercase))
return successors
def lowerCamelCase_ ( self , lowercase) -> Path:
'''simple docstring'''
a__: Tuple = node
a__: List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
a__: str = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = BreadthFirstSearch(lowercase , lowercase)
a__: Any = BreadthFirstSearch(lowercase , lowercase)
a__: Dict = False
def lowerCamelCase_ ( self) -> Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
a__: Optional[Any] = self.fwd_bfs.node_queue.pop(0)
a__: Optional[Any] = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
a__: List[Any] = True
return self.retrace_bidirectional_path(
lowercase , lowercase)
a__: Optional[int] = current_bwd_node
a__: str = current_fwd_node
a__: Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase_ ( self , lowercase , lowercase) -> Path:
'''simple docstring'''
a__: Optional[Any] = self.fwd_bfs.retrace_path(lowercase)
a__: List[str] = self.bwd_bfs.retrace_path(lowercase)
bwd_path.pop()
bwd_path.reverse()
a__: Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ = time.time()
lowercase__ = BreadthFirstSearch(init, goal)
lowercase__ = bfs.search()
lowercase__ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowercase__ = time.time()
lowercase__ = BidirectionalBreadthFirstSearch(init, goal)
lowercase__ = bd_bfs.search()
lowercase__ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase__ = logging.get_logger(__name__)
def __a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __snake_case :
a__ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
a__ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
a__ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Benchmark training of model"""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Verbose memory tracing"""} )
a__ = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
a__ = field(
default=__lowerCAmelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Trace memory line by line"""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Save result to a CSV file"""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Save all print statements in a log file"""} )
a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Whether to print environment information"""} )
a__ = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
a__ = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
a__ = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
a__ = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
a__ = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
a__ = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
a__ = field(
default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
a__ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
a__ = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , lowercase , )
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True
| 290 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=1_25 , lowercase=None , **lowercase , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a__: Optional[Any] = [f'<extra_id_{i}>' for i in range(lowercase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a__: Optional[int] = len(set(filter(lambda lowercase: bool('extra_id' in str(lowercase)) , lowercase)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens')
a__: Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else pad_token
a__: Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else eos_token
a__: Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else unk_token
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Union[str, Any] = extra_ids
a__: str = 2**8 # utf is 8 bits
# define special tokens dict
a__: Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a__: Dict = len(self.special_tokens_encoder)
a__: List[Any] = len(lowercase)
for i, token in enumerate(lowercase):
a__: Union[str, Any] = self.vocab_size + i - n
a__: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase)) + [1]
return ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1]
def lowerCamelCase_ ( self , lowercase) -> List[int]:
'''simple docstring'''
if len(lowercase) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Dict = self._add_eos_if_not_present(lowercase)
if token_ids_a is None:
return token_ids_a
else:
a__: Optional[Any] = self._add_eos_if_not_present(lowercase)
return token_ids_a + token_ids_a
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: int = [chr(lowercase) for i in text.encode('utf-8')]
return tokens
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
if token in self.special_tokens_encoder:
a__: List[str] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a__: Union[str, Any] = self.added_tokens_encoder[token]
elif len(lowercase) != 1:
a__: List[Any] = self.unk_token_id
else:
a__: int = ord(lowercase) + self._num_special_tokens
return token_id
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
if index in self.special_tokens_decoder:
a__: Tuple = self.special_tokens_decoder[index]
else:
a__: Tuple = chr(index - self._num_special_tokens)
return token
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = B''
for token in tokens:
if token in self.special_tokens_decoder:
a__: Optional[Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.added_tokens_decoder:
a__: List[Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.special_tokens_encoder:
a__: Any = token.encode('utf-8')
elif token in self.added_tokens_encoder:
a__: Union[str, Any] = token.encode('utf-8')
else:
a__: Union[str, Any] = bytes([ord(lowercase)])
bstring += tok_string
a__: str = bstring.decode('utf-8' , errors='ignore')
return string
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
return ()
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
a__: Dict = True
# Deal with multi-line cases
elif (
re.search(
rF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , _SCREAMING_SNAKE_CASE , )
is not None
):
a__: str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a__: List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a__: Union[str, Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
a__: List[Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
a__: Any = True
if not attribute_used:
a__: int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a__: Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a__: str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a__: List[Any] = True
elif attribute.endswith('_token_id' ):
a__: Any = True
# configuration class specific cases
if not case_allowed:
a__: Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a__: Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __a ( _SCREAMING_SNAKE_CASE ) ->Dict:
a__: List[str] = dict(inspect.signature(config_class.__init__ ).parameters )
a__: Any = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
a__: str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a__: Union[str, Any] = {}
if len(config_class.attribute_map ) > 0:
a__: int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a__: List[Any] = inspect.getsourcefile(_SCREAMING_SNAKE_CASE )
a__: Dict = os.path.dirname(_SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a__: str = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for fn in os.listdir(_SCREAMING_SNAKE_CASE ) if fn.startswith('modeling_' )]
# Get the source code strings
a__: Tuple = []
for path in modeling_paths:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
a__: List[Any] = []
for config_param, default_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
a__: Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(_SCREAMING_SNAKE_CASE )
def __a ( ) ->List[str]:
a__: Any = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a__: Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _SCREAMING_SNAKE_CASE : inspect.isclass(_SCREAMING_SNAKE_CASE )
and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and inspect.getmodule(_SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a__: Tuple = check_config_attributes_being_used(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
a__: Any = unused_attributes
if len(_SCREAMING_SNAKE_CASE ) > 0:
a__: Optional[Any] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase__ = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
lowercase__ = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def __a ( ) ->Optional[Any]:
a__: List[str] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bootstrap_aggregation=_SCREAMING_SNAKE_CASE , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bootstrap_aggregation=_SCREAMING_SNAKE_CASE , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def __a ( ) ->Dict:
a__: List[Any] = 'rougeLsum'
a__: str = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
a__: int = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def __a ( ) ->Dict:
a__: List[Any] = ['rouge1', 'rouge2', 'rougeL']
a__: Optional[int] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=_SCREAMING_SNAKE_CASE )
a__: int = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=_SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def __a ( ) ->Optional[int]:
a__: List[str] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
a__: List[str] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE ) == calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE )
def __a ( ) ->List[Any]:
a__: Dict = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
a__: Optional[int] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
a__: Optional[Any] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rouge_keys=['rougeLsum'] , newline_sep=_SCREAMING_SNAKE_CASE )['rougeLsum']
a__: List[str] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def __a ( ) ->Tuple:
a__: Tuple = Path('examples/seq2seq/test_data/wmt_en_ro' )
a__: List[Any] = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Dict = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CLIPTokenizer
a__ = CLIPTokenizerFast
a__ = True
a__ = {}
a__ = False
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
a__: Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__: Optional[int] = dict(zip(lowercase , range(len(lowercase))))
a__: Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
a__: Tuple = {'unk_token': '<unk>'}
a__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , **lowercase) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = 'lower newer'
a__: Tuple = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a__: List[str] = 'lower newer'
a__: Optional[int] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
a__: Union[str, Any] = tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = tokens + [tokenizer.unk_token]
a__: List[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
@require_ftfy
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: int = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
a__: int = tokenizer_s.tokenize(lowercase)
a__: Union[str, Any] = tokenizer_r.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__: Any = 'xa\u0303y' + ' ' + 'x\xe3y'
a__: Any = tokenizer_s.tokenize(lowercase)
a__: List[str] = tokenizer_r.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
# Test that the tokenization is identical on unicode of space type
a__: Tuple = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__: Union[str, Any] = tokenizer_s.tokenize(lowercase)
a__: Optional[int] = tokenizer_r.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
# Test that the tokenization is identical on unicode of line break type
a__: List[Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__: Optional[Any] = tokenizer_s.tokenize(lowercase)
a__: Any = tokenizer_r.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
a__: Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , )
a__: Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase)
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase)))
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase) + 1, len(lowercase) + 1 + len(lowercase)) , )
a__: Any = f' {text}'
a__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , )
a__: int = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase) + 1, 1 + len(lowercase) + 1 + len(lowercase)) , )
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
with self.assertRaises(lowercase) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer')
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.'))
@require_ftfy
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
pass
| 290 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 0
a__: Optional[int] = [0]
a__: Any = [0]
a__: int = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
a__: Tuple = [60]
a__: Union[str, Any] = [10]
a__: Tuple = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = 3
a__: List[Any] = [1, 2, 3]
a__: Any = [3, 2, 1]
a__: List[Any] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 5)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = 50
a__: List[str] = [60, 1_00, 1_20]
a__: Union[str, Any] = [10, 20, 30]
a__: str = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 2_20)
if __name__ == "__main__":
unittest.main()
| 290 | """simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( _SCREAMING_SNAKE_CASE = 0.1 ) ->int:
a__: str = 3
a__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
lowercase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( ) ->None:
a__: Optional[Any] = input('Enter message: ' )
a__: List[Any] = input('Enter key [alphanumeric]: ' )
a__: Union[str, Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__: Any = 'encrypt'
a__: Optional[int] = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('d' ):
a__: int = 'decrypt'
a__: Tuple = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'\n{mode.title()}ed message:' )
print(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'encrypt' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'decrypt' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: int = []
a__: Dict = 0
a__: List[Any] = key.upper()
for symbol in message:
a__: List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_SCREAMING_SNAKE_CASE ):
a__: Optional[Any] = 0
else:
translated.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 290 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ = 250004
lowercase__ = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = MBartaaTokenizer
a__ = MBartaaTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Union[str, Any] = MBartaaTokenizer(lowercase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = '<s>'
a__: Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_54)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: int = MBartaaTokenizer(lowercase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase)
a__: List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a__: str = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
a__: Any = tokenizer.convert_tokens_to_ids(lowercase)
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__: str = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: int = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a__: str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: Optional[Any] = tempfile.mkdtemp()
a__: Tuple = tokenizer_r.save_pretrained(lowercase)
a__: Dict = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
a__: List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__: str = tokenizer_r.from_pretrained(lowercase)
a__: List[Any] = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=True
a__: Dict = tempfile.mkdtemp()
a__: Dict = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__: Optional[int] = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__: Union[str, Any] = tokenizer_r.from_pretrained(lowercase)
a__: Dict = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=False
a__: str = tempfile.mkdtemp()
a__: Optional[int] = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__: str = tokenizer_p.save_pretrained(lowercase)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a__: str = tokenizer_r.from_pretrained(lowercase)
a__: int = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
a__ = """facebook/mbart-large-50-one-to-many-mmt"""
a__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a__ = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def lowerCamelCase_ ( cls) -> List[str]:
'''simple docstring'''
a__: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO')
a__: Any = 1
return cls
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
self.assertIn(lowercase , self.tokenizer.all_special_ids)
a__: Union[str, Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
a__: List[str] = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase)
a__: Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase)
self.assertEqual(lowercase , lowercase)
self.assertNotIn(self.tokenizer.eos_token , lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowercase)
a__: str = 10
a__: str = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase).input_ids[0]
self.assertEqual(ids[0] , lowercase)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [25_00_53, 25_00_01])
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Dict = tempfile.mkdtemp()
a__: str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase)
a__: Optional[Any] = MBartaaTokenizer.from_pretrained(lowercase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase)
@require_torch
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors='pt')
a__: Union[str, Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
a__: List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
a__: List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors='pt')
a__: int = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors='pt')
a__: Any = targets['input_ids']
a__: List[str] = shift_tokens_right(lowercase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(lowercase) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 290 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | """simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRContextEncoderTokenizer
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowercase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowercase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__lowerCAmelCase )
class __snake_case :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
a__: Dict = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
a__: List[Any] = titles if not isinstance(lowercase , lowercase) else [titles]
a__: str = texts if not isinstance(lowercase , lowercase) else [texts]
a__: Any = len(lowercase)
a__: Tuple = questions if not isinstance(lowercase , lowercase) else [questions] * n_passages
assert len(lowercase) == len(
lowercase), f'There should be as many titles than texts but got {len(lowercase)} titles and {len(lowercase)} texts.'
a__: Tuple = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: int = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase)
]
}
if return_attention_mask is not False:
a__: List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
a__: Optional[int] = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Union[str, Any] = reader_input['input_ids']
a__ , a__ , a__: Dict = reader_output[:3]
a__: Any = len(lowercase)
a__: List[str] = sorted(range(lowercase) , reverse=lowercase , key=relevance_logits.__getitem__)
a__: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__: Dict = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
a__: Union[str, Any] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__: Optional[int] = sequence_ids.index(self.pad_token_id)
else:
a__: str = len(lowercase)
a__: Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: List[str] = []
for start_index, start_score in enumerate(lowercase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
a__: Dict = sorted(lowercase , key=lambda lowercase: x[1] , reverse=lowercase)
a__: Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
a__: List[Any] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
a__ = DPRReaderTokenizer
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if height >= 1:
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE )
def __a ( ) ->List[str]:
a__: Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class __snake_case :
def __init__( self , lowercase) -> int:
'''simple docstring'''
a__: Dict = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a__: List[Any] = len(lowercase) - 1
def lowerCamelCase_ ( self , lowercase) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a__: list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowercase) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase) , 5) == 1
return output_values
def lowerCamelCase_ ( self , lowercase) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a__: Tuple = self.basis_function(lowercase)
a__: List[str] = 0.0
a__: List[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self , lowercase = 0.01) -> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
a__: list[float] = [] # x coordinates of points to plot
a__: list[float] = [] # y coordinates of points to plot
a__: List[str] = 0.0
while t <= 1:
a__: Union[str, Any] = self.bezier_curve_function(lowercase)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
a__: Dict = [i[0] for i in self.list_of_points]
a__: Optional[Any] = [i[1] for i in self.list_of_points]
plt.plot(
lowercase , lowercase , color='blue' , label='Curve of Degree ' + str(self.degree) , )
plt.scatter(lowercase , lowercase , color='red' , label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 1 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: Optional[int] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a__ , a__: List[Any] = True, True
a__: Optional[Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = 0
a__: List[str] = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a__: Union[str, Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a__ , a__: Union[str, Any] = check_circuit_or_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
a__: List[Any] = 1
if check == 2:
a__: Tuple = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
a__: Union[str, Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
def __a ( ) ->List[Any]:
a__: Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a__: int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a__: List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a__: Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a__: Optional[int] = {
1: [],
2: []
# all degree is zero
}
a__: Optional[Any] = 10
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase__ = 100
lowercase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __a ( _SCREAMING_SNAKE_CASE ) ->set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
a__: set[int] = set()
a__: int
a__: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __a ( _SCREAMING_SNAKE_CASE = 5000 ) ->int | None:
for number_to_partition in range(1 , _SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | """simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290 | 1 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase__ = pytest.mark.integration
lowercase__ = {'comet'}
lowercase__ = importlib.util.find_spec('fairseq') is not None
lowercase__ = {'code_eval'}
lowercase__ = os.name == 'nt'
lowercase__ = {'bertscore', 'frugalscore', 'perplexity'}
lowercase__ = importlib.util.find_spec('transformers') is not None
def __a ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self , _SCREAMING_SNAKE_CASE ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self , _SCREAMING_SNAKE_CASE ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self , _SCREAMING_SNAKE_CASE ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def __a ( ) ->List[Any]:
a__: Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@local
class __snake_case ( parameterized.TestCase ):
a__ = {}
a__ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning')
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Optional[Any] = '[...]'
a__: Optional[int] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowercase)).module_path)
a__: int = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase)
# check parameters
a__: Tuple = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase , metric_module.__name__):
with self.use_local_metrics():
try:
a__: int = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
a__: Tuple = '[...]'
a__: str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowercase)).module_path)
# run doctest
with self.use_local_metrics():
a__: str = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowerCamelCase_ ( self , lowercase , lowercase) -> Tuple:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
def load_local_metric(lowercase , *lowercase , **lowercase):
return load_metric(os.path.join('metrics' , lowercase) , *lowercase , **lowercase)
with patch('datasets.load_metric') as mock_load_metric:
a__: Tuple = load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls , lowercase) -> int:
'''simple docstring'''
def wrapper(lowercase):
a__: Union[str, Any] = contextmanager(lowercase)
a__: Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
assert len(input_dict['input_ids']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
a__: int = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
a__: Union[str, Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
def load_from_checkpoint(_SCREAMING_SNAKE_CASE ):
class __snake_case :
def lowerCamelCase_ ( self , lowercase , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
assert len(lowercase) == 2
a__: Dict = [0.19, 0.92]
return scores, sum(lowercase) / len(lowercase)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
a__: Dict = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
a__: Tuple = load_from_checkpoint
yield
def __a ( ) ->Dict:
a__: Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
a__: Union[str, Any] = 'ERROR'
a__: List[Any] = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = None
a__ = BloomTokenizerFast
a__ = BloomTokenizerFast
a__ = True
a__ = False
a__ = """tokenizer_file"""
a__ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().setUp()
a__: int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: List[str] = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
a__: List[Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
a__: Any = tokenizer.batch_encode_plus(lowercase)['input_ids']
self.assertListEqual(lowercase , lowercase)
a__: int = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self , lowercase=6) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a__: Dict = 'This is a simple input'
a__: Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
a__: Optional[Any] = ('This is a simple input', 'This is a pair')
a__: str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase , max_length=lowercase)
tokenizer_r.encode_plus(lowercase , max_length=lowercase)
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase)
tokenizer_r.encode(lowercase , max_length=lowercase)
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
a__: Optional[int] = None # Hotfixing padding = None
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length')
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length')
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length')
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length')
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.get_rust_tokenizer()
a__: Optional[Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=lowercase)
a__: Any = next(iter(lowercase))['premise'] # pick up one data
a__: List[str] = list(sample_data.values())
a__: List[str] = list(map(tokenizer.encode , lowercase))
a__: Union[str, Any] = [tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase) for x in output_tokens]
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 290 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[tuple[int, int]]:
a__ , a__: Union[str, Any] = position
a__: int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a__: Any = []
for position in positions:
a__ , a__: List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return not any(elem == 0 for row in board for elem in row )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
a__ , a__: Tuple = position
if board[y][x] == 0:
a__: Dict = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
a__: Tuple = 0
return False
def __a ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]:
a__: Union[str, Any] = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
a__: List[str] = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
a__: Optional[int] = 0
a__: Tuple = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=10_00 , ) -> Any:
'''simple docstring'''
a__: List[str] = parent
a__: Any = batch_size
a__: int = seq_length
a__: Tuple = is_training
a__: Dict = use_input_mask
a__: Optional[int] = use_token_type_ids
a__: str = use_labels
a__: Optional[Any] = vocab_size
a__: int = hidden_size
a__: List[str] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Any = intermediate_size
a__: str = hidden_act
a__: List[Any] = hidden_dropout_prob
a__: Any = attention_probs_dropout_prob
a__: List[str] = max_position_embeddings
a__: int = type_vocab_size
a__: int = type_sequence_label_size
a__: Any = initializer_range
a__: int = num_labels
a__: Optional[Any] = num_choices
a__: Optional[Any] = scope
a__: Any = range_bbox
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
a__: Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__: int = bbox[i, j, 3]
a__: str = bbox[i, j, 1]
a__: Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__: List[Any] = bbox[i, j, 2]
a__: Any = bbox[i, j, 0]
a__: Dict = t
a__: List[str] = tf.convert_to_tensor(lowercase)
a__: Optional[int] = None
if self.use_input_mask:
a__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__: int = None
if self.use_token_type_ids:
a__: int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: Any = None
a__: List[Any] = None
a__: Optional[int] = None
if self.use_labels:
a__: int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: Tuple = ids_tensor([self.batch_size] , self.num_choices)
a__: int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Any = TFLayoutLMModel(config=lowercase)
a__: str = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__: List[str] = model(lowercase , lowercase , token_type_ids=lowercase)
a__: Tuple = model(lowercase , lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = TFLayoutLMForMaskedLM(config=lowercase)
a__: Any = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Dict = self.num_labels
a__: Optional[int] = TFLayoutLMForSequenceClassification(config=lowercase)
a__: Any = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = self.num_labels
a__: Optional[int] = TFLayoutLMForTokenClassification(config=lowercase)
a__: int = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Any = TFLayoutLMForQuestionAnswering(config=lowercase)
a__: Tuple = model(lowercase , lowercase , attention_mask=lowercase , token_type_ids=lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): List[Any] = config_and_inputs
a__: Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
a__ = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = True
a__ = 10
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = TFLayoutLMModelTester(self)
a__: List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Any = TFLayoutLMModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@unittest.skip('Onnx compliancy broke with TF 2.10')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
pass
def __a ( ) ->Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
a__: List[str] = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
a__: Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
a__: Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
a__: Optional[int] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
a__: Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: str = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased')
a__ , a__ , a__ , a__ , a__: Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
a__: Union[str, Any] = model(input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase)
# test the sequence output on [0, :3, :3]
a__: Optional[int] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-3))
# test the pooled output on [1, :3]
a__: Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowercase , atol=1e-3))
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2)
a__ , a__ , a__ , a__ , a__: Dict = prepare_layoutlm_batch_inputs()
# forward pass
a__: List[Any] = model(
input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
a__: Tuple = outputs.loss
a__: Union[str, Any] = (2,)
self.assertEqual(loss.shape , lowercase)
# test the shape of the logits
a__: int = outputs.logits
a__: Dict = (2, 2)
self.assertEqual(logits.shape , lowercase)
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[Any] = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13)
a__ , a__ , a__ , a__ , a__: List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
a__: int = model(
input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
# test the shape of the logits
a__: str = outputs.logits
a__: Any = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape , lowercase)
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased')
a__ , a__ , a__ , a__ , a__: Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
a__: Optional[int] = model(input_ids=lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase)
# test the shape of the logits
a__: Dict = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape , lowercase)
self.assertEqual(outputs.end_logits.shape , lowercase)
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __snake_case ( __lowerCAmelCase ):
a__ = """open-llama"""
def __init__( self , lowercase=10_00_00 , lowercase=40_96 , lowercase=1_10_08 , lowercase=32 , lowercase=32 , lowercase="silu" , lowercase=20_48 , lowercase=0.02 , lowercase=1e-6 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=True , lowercase=None , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: List[Any] = vocab_size
a__: Any = max_position_embeddings
a__: int = hidden_size
a__: str = intermediate_size
a__: Union[str, Any] = num_hidden_layers
a__: List[Any] = num_attention_heads
a__: Dict = hidden_act
a__: List[Any] = initializer_range
a__: Optional[int] = rms_norm_eps
a__: Optional[Any] = use_cache
a__: Any = kwargs.pop(
'use_memorry_efficient_attention' , lowercase)
a__: List[Any] = hidden_dropout_prob
a__: str = attention_dropout_prob
a__: int = use_stable_embedding
a__: Union[str, Any] = shared_input_output_embedding
a__: int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase , )
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}')
a__: Optional[int] = self.rope_scaling.get('type' , lowercase)
a__: Optional[int] = self.rope_scaling.get('factor' , lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(lowercase , lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 290 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) ->tuple[int, float, str]:
a__: List[Any] = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a__: List[Any] = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
a__: Dict = frequencies_dict
if not case_sensitive:
a__: Any = ciphertext.lower()
# Chi squared statistic values
a__: dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
a__: Optional[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a__: Optional[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a__: Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a__: int = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a__: Optional[Any] = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a__: Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a__: Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a__: Union[str, Any] = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a__: Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a__: Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a__: int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a__: int = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a__
) , (
a__
) ,
): str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase__ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
lowercase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase__ = 'allenai'
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a__: Union[str, Any] = dict((re.sub(r'@@$' , '' , _SCREAMING_SNAKE_CASE ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
a__: List[str] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
a__: Any = d[k] # restore
return da
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
# prep
assert os.path.exists(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
a__: Optional[int] = basename(_SCREAMING_SNAKE_CASE )
a__: str = dirname(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a__: List[Any] = cls.hub_models()
a__: str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
a__: int = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
a__: List[str] = hub_utils.from_pretrained(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , archive_map=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a__: int = vars(chkpt['args']['model'] )
a__: int = args['source_lang']
a__: Optional[Any] = args['target_lang']
a__: Optional[int] = dirname(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = basename(_SCREAMING_SNAKE_CASE )
# dicts
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'dict.{src_lang}.txt' )
a__: Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , F'dict.{tgt_lang}.txt' )
a__: int = Dictionary.load(_SCREAMING_SNAKE_CASE )
a__: Tuple = rewrite_dict_keys(src_dict.indices )
a__: Optional[int] = len(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab-src.json' )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a__: str = True
for k in src_vocab.keys():
if not k.islower():
a__: int = False
break
a__: List[str] = Dictionary.load(_SCREAMING_SNAKE_CASE )
a__: int = rewrite_dict_keys(tgt_dict.indices )
a__: int = len(_SCREAMING_SNAKE_CASE )
a__: Tuple = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab-tgt.json' )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
a__: Dict = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
break
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as fin:
a__: Dict = fin.read()
a__: List[Any] = re.sub(r' \d+$' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as fout:
fout.write(_SCREAMING_SNAKE_CASE )
# model config
a__: List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
a__: Any = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
a__: Optional[Any] = 5
a__: Union[str, Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a__: str = best_score_hparams[model_dir]['length_penalty']
else:
a__: Tuple = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
a__: List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
a__: Union[str, Any] = chkpt['models'][0]
a__: str = model.state_dict()
# rename keys to start with 'model.'
a__: Optional[int] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a__: Union[str, Any] = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Dict = FSMTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = FSMTForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
# save
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = RoFormerTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
a__: str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , lowercase) != do_lower_case
or pre_tok_state.get('strip_accents' , lowercase) != strip_accents
):
a__: List[str] = getattr(lowercase , pre_tok_state.pop('type'))
a__: Union[str, Any] = do_lower_case
a__: Any = strip_accents
a__: str = pre_tok_class(**lowercase)
a__: Optional[int] = do_lower_case
def __getstate__( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = self.__dict__.copy()
a__: Dict = BertPreTokenizer()
return state
def __setstate__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = d
a__: List[str] = self.__dict__['_tokenizer'].get_vocab()
a__: Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(lowercase))
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> int:
'''simple docstring'''
a__: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Dict = [self.sep_token_id]
a__: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__: int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase=None , lowercase=None , lowercase=False , **lowercase , ) -> int:
'''simple docstring'''
a__: int = BertPreTokenizer()
return super().save_pretrained(lowercase , lowercase , lowercase , lowercase , **lowercase)
| 290 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CodeGenTokenizer
a__ = CodeGenTokenizerFast
a__ = True
a__ = {"""add_prefix_space""": True}
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__: int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
a__: List[str] = dict(zip(lowercase , range(len(lowercase))))
a__: Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__: Tuple = {'unk_token': '<unk>'}
a__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , **lowercase) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: List[str] = 'lower newer'
a__: str = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a__: Union[str, Any] = 'lower newer'
a__: Any = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__: Tuple = tokenizer.tokenize(lowercase , add_prefix_space=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokens + [tokenizer.unk_token]
a__: Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Union[str, Any] = self.get_tokenizer()
a__: int = self.get_rust_tokenizer(add_prefix_space=lowercase)
a__: Any = 'lower newer'
# Testing tokenization
a__: Any = tokenizer.tokenize(lowercase , add_prefix_space=lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
# Testing conversion to ids without special tokens
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# Testing conversion to ids with special tokens
a__: Dict = self.get_rust_tokenizer(add_prefix_space=lowercase)
a__: Tuple = tokenizer.encode(lowercase , add_prefix_space=lowercase)
a__: Tuple = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
# Testing the unknown token
a__: Any = tokens + [rust_tokenizer.unk_token]
a__: Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self , lowercase=15) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
# Simple input
a__: Any = 'This is a simple input'
a__: str = ['This is a simple input 1', 'This is a simple input 2']
a__: Any = ('This is a simple input', 'This is a pair')
a__: int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length')
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length')
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length')
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length')
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>')
# Simple input
a__: str = 'This is a simple input'
a__: List[Any] = ['This is a simple input looooooooong', 'This is a simple input']
a__: int = ('This is a simple input', 'This is a pair')
a__: str = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
a__: List[Any] = tokenizer.pad_token_id
a__: Optional[Any] = tokenizer(lowercase , padding='max_length' , max_length=30 , return_tensors='np')
a__: Union[str, Any] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np')
a__: Union[str, Any] = tokenizer(*lowercase , padding='max_length' , max_length=60 , return_tensors='np')
a__: Optional[Any] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np')
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s['input_ids'])
self.assertTrue(0 in out_s['attention_mask'])
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0])
self.assertFalse(0 in out_sa['attention_mask'][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1])
self.assertTrue(0 in out_sa['attention_mask'][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p['input_ids'])
self.assertTrue(0 in out_p['attention_mask'])
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0])
self.assertFalse(0 in out_pa['attention_mask'][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1])
self.assertTrue(0 in out_pa['attention_mask'][1])
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = '$$$'
a__: Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase)
a__: List[str] = 'This is a simple input'
a__: List[Any] = ['This is a simple input 1', 'This is a simple input 2']
a__: Tuple = tokenizer.bos_token_id
a__: Optional[Any] = tokenizer(lowercase)
a__: Dict = tokenizer(lowercase)
self.assertEqual(out_s.input_ids[0] , lowercase)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
a__: str = tokenizer.decode(out_s.input_ids)
a__: List[str] = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , lowercase)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
a__: int = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
a__: Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
a__: Dict = tokenizer.encode(lowercase)
a__: Union[str, Any] = ['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n']
a__: List[str] = tokenizer.decode(lowercase , truncate_before_pattern=lowercase)
self.assertEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
"""simple docstring"""
lowercase__ = [0, 2, 4, 6, 8]
lowercase__ = [1, 3, 5, 7, 9]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__: Union[str, Any] = 0
for digit in range(10 ):
a__: Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
a__: Dict = 0
for digita in range(10 ):
a__: List[str] = digita
if (remainder + digita) % 2 == 0:
a__: Any = ODD_DIGITS
else:
a__: Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
a__: Dict = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return result
def __a ( _SCREAMING_SNAKE_CASE = 9 ) ->int:
a__: Dict = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_SCREAMING_SNAKE_CASE , 0 , [0] * length , _SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __snake_case ( __lowerCAmelCase ):
a__ = """deta"""
a__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase=None , lowercase=9_00 , lowercase=20_48 , lowercase=6 , lowercase=20_48 , lowercase=8 , lowercase=6 , lowercase=10_24 , lowercase=8 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=2_56 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=True , lowercase=False , lowercase="sine" , lowercase=5 , lowercase=4 , lowercase=4 , lowercase=True , lowercase=3_00 , lowercase=True , lowercase=True , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , lowercase=0.25 , **lowercase , ) -> Dict:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
a__: Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowercase , lowercase):
a__: Optional[int] = backbone_config.pop('model_type')
a__: Optional[int] = CONFIG_MAPPING[backbone_model_type]
a__: List[Any] = config_class.from_dict(lowercase)
a__: Any = backbone_config
a__: Union[str, Any] = num_queries
a__: int = max_position_embeddings
a__: Tuple = d_model
a__: List[str] = encoder_ffn_dim
a__: int = encoder_layers
a__: Any = encoder_attention_heads
a__: Any = decoder_ffn_dim
a__: Any = decoder_layers
a__: int = decoder_attention_heads
a__: Any = dropout
a__: Optional[Any] = attention_dropout
a__: int = activation_dropout
a__: Tuple = activation_function
a__: int = init_std
a__: Dict = init_xavier_std
a__: int = encoder_layerdrop
a__: Any = auxiliary_loss
a__: str = position_embedding_type
# deformable attributes
a__: Tuple = num_feature_levels
a__: Union[str, Any] = encoder_n_points
a__: Dict = decoder_n_points
a__: Tuple = two_stage
a__: List[str] = two_stage_num_proposals
a__: List[Any] = with_box_refine
a__: List[str] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
a__: int = class_cost
a__: Dict = bbox_cost
a__: Optional[int] = giou_cost
# Loss coefficients
a__: Union[str, Any] = mask_loss_coefficient
a__: Union[str, Any] = dice_loss_coefficient
a__: List[str] = bbox_loss_coefficient
a__: str = giou_loss_coefficient
a__: int = eos_coefficient
a__: Tuple = focal_alpha
super().__init__(is_encoder_decoder=lowercase , **lowercase)
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.d_model
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[Any] = copy.deepcopy(self.__dict__)
a__: Tuple = self.backbone_config.to_dict()
a__: Optional[Any] = self.__class__.model_type
return output
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=4_00 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 2_55 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
a__: Dict = parent
a__: str = batch_size
a__: str = num_channels
a__: int = min_resolution
a__: Union[str, Any] = max_resolution
a__: Optional[Any] = do_resize
a__: Union[str, Any] = size
a__: int = do_rescale
a__: List[Any] = rescale_factor
a__: Dict = do_normalize
a__: Tuple = image_mean
a__: List[Any] = image_std
a__: Union[str, Any] = do_pad
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , lowercase , lowercase=False) -> Optional[Any]:
'''simple docstring'''
if not batched:
a__: List[Any] = image_inputs[0]
if isinstance(lowercase , Image.Image):
a__ , a__: str = image.size
else:
a__ , a__: Any = image.shape[1], image.shape[2]
if w < h:
a__: Dict = int(self.size['shortest_edge'] * h / w)
a__: str = self.size['shortest_edge']
elif w > h:
a__: Union[str, Any] = self.size['shortest_edge']
a__: Dict = int(self.size['shortest_edge'] * w / h)
else:
a__: Tuple = self.size['shortest_edge']
a__: Optional[Any] = self.size['shortest_edge']
else:
a__: List[str] = []
for image in image_inputs:
a__ , a__: Optional[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__: List[str] = max(lowercase , key=lambda lowercase: item[0])[0]
a__: Tuple = max(lowercase , key=lambda lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: str = DetrImageProcessingTester(self)
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase , 'image_mean'))
self.assertTrue(hasattr(lowercase , 'image_std'))
self.assertTrue(hasattr(lowercase , 'do_normalize'))
self.assertTrue(hasattr(lowercase , 'do_rescale'))
self.assertTrue(hasattr(lowercase , 'rescale_factor'))
self.assertTrue(hasattr(lowercase , 'do_resize'))
self.assertTrue(hasattr(lowercase , 'size'))
self.assertTrue(hasattr(lowercase , 'do_pad'))
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , lowercase)
a__: Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image)
# Test not batched input
a__: str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: str = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__: Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
a__: Optional[Any] = image_processing(lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray)
# Test not batched input
a__: int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: str = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: Dict = image_processing(lowercase , return_tensors='pt').pixel_values
a__ , a__: Any = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor)
# Test not batched input
a__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: Optional[int] = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: int = image_processing(lowercase , return_tensors='pt').pixel_values
a__ , a__: Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__: Dict = json.loads(f.read())
a__: Union[str, Any] = {'image_id': 3_97_69, 'annotations': target}
# encode them
a__: List[str] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50')
a__: Dict = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt')
# verify pixel values
a__: str = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowercase)
a__: List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1e-4))
# verify area
a__: str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase))
# verify boxes
a__: Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase)
a__: Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1e-3))
# verify image_id
a__: Optional[Any] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase))
# verify is_crowd
a__: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase))
# verify class_labels
a__: Dict = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase))
# verify orig_size
a__: Optional[Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase))
# verify size
a__: Tuple = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase))
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__: List[str] = json.loads(f.read())
a__: Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
a__: Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic')
a__: Optional[int] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt')
# verify pixel values
a__: str = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowercase)
a__: Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1e-4))
# verify area
a__: Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase))
# verify boxes
a__: Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase)
a__: List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1e-3))
# verify image_id
a__: Union[str, Any] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase))
# verify is_crowd
a__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase))
# verify class_labels
a__: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase))
# verify masks
a__: int = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase)
# verify orig_size
a__: Optional[Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase))
# verify size
a__: Any = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase))
| 290 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 1 |
"""simple docstring"""
lowercase__ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
lowercase__ = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a__: int = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( _SCREAMING_SNAKE_CASE = 0.1 ) ->int:
a__: str = 3
a__: Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = SpeechTaTokenizer
a__ = False
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: List[Any] = SpeechTaTokenizer(lowercase)
a__: Optional[int] = AddedToken('<mask>' , lstrip=lowercase , rstrip=lowercase)
a__: Any = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
a__: Optional[Any] = 'this is a test'
a__: int = 'this is a test'
return input_text, output_text
def lowerCamelCase_ ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5) -> List[str]:
'''simple docstring'''
a__ , a__: List[str] = self.get_input_output_texts(lowercase)
a__: List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: Dict = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase)
return text, ids
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Dict = '<pad>'
a__: Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(lowercase) , 81)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = self.get_tokenizers(do_lower_case=lowercase)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
a__: List[str] = tokenizer.vocab_size
a__: Tuple = len(lowercase)
self.assertNotEqual(lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__: Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
a__: Optional[int] = tokenizer.add_tokens(lowercase)
a__: int = tokenizer.vocab_size
a__: int = len(lowercase)
self.assertNotEqual(lowercase , 0)
self.assertEqual(lowercase , lowercase)
self.assertEqual(lowercase , len(lowercase))
self.assertEqual(lowercase , all_size + len(lowercase))
a__: Tuple = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase)
self.assertGreaterEqual(len(lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
a__: Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a__: Optional[Any] = tokenizer.add_special_tokens(lowercase)
a__: int = tokenizer.vocab_size
a__: Tuple = len(lowercase)
self.assertNotEqual(lowercase , 0)
self.assertEqual(lowercase , lowercase)
self.assertEqual(lowercase , len(lowercase))
self.assertEqual(lowercase , all_size_a + len(lowercase))
a__: List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase)
self.assertGreaterEqual(len(lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = self.get_tokenizer()
a__: int = tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(lowercase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a__: Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
a__: Dict = tokenizer.convert_tokens_to_ids(lowercase)
# fmt: off
self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
a__: Optional[int] = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a__: Union[str, Any] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=lowercase , )
| 290 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( __lowerCAmelCase ):
a__ = """new-model"""
if is_tf_available():
class __snake_case ( __lowerCAmelCase ):
a__ = NewModelConfig
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = 'bert-base-cased'
a__: List[str] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: List[str] = TFAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = 'bert-base-cased'
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: List[Any] = TFAutoModelForPreTraining.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: List[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase)
a__ , a__: Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Any = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase)
a__ , a__: Dict = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: int = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase)
a__ , a__: str = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: Any = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Dict = TFAutoModelForQuestionAnswering.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a__: Any = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase)
a__ , a__: Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_44_10)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_44_10)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Union[str, Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(lowercase , lowercase)
a__: Any = copy.deepcopy(model.config)
a__: Any = ['FunnelBaseModel']
a__: Union[str, Any] = TFAutoModel.from_config(lowercase)
self.assertIsInstance(lowercase , lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase)
a__: Union[str, Any] = TFAutoModel.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
try:
AutoConfig.register('new-model' , lowercase)
a__: Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(lowercase):
auto_class.register(lowercase , lowercase)
auto_class.register(lowercase , lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase):
auto_class.register(lowercase , lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
a__: Optional[int] = BertModelTester(self).get_config()
a__: Tuple = NewModelConfig(**tiny_config.to_dict())
a__: int = auto_class.from_config(lowercase)
self.assertIsInstance(lowercase , lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase)
a__: Dict = auto_class.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__: str = TFAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: List[str] = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
a__: List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model'):
a__: Union[str, Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
a__: Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
a__: Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
a__: List[str] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 290 | """simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
@staticmethod
def lowerCamelCase_ ( *lowercase , **lowercase) -> Any:
'''simple docstring'''
pass
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: List[str] = pipeline(
'document-question-answering' , model=lowercase , tokenizer=lowercase , image_processor=lowercase)
a__: List[str] = INVOICE_URL
a__: Dict = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
a__: str = 'What is the placebo?'
a__: List[Any] = [
{
'image': load_image(lowercase),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = dqa_pipeline(lowercase , top_k=2)
self.assertEqual(
lowercase , [
[
{'score': ANY(lowercase), 'answer': ANY(lowercase), 'start': ANY(lowercase), 'end': ANY(lowercase)},
{'score': ANY(lowercase), 'answer': ANY(lowercase), 'start': ANY(lowercase), 'end': ANY(lowercase)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
a__: List[Any] = INVOICE_URL
a__: List[str] = 'How many cats are there?'
a__: Union[str, Any] = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
a__: List[str] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , lowercase)
a__: Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , lowercase)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a__: int = './tests/fixtures/tests_samples/COCO/000000039769.png'
a__: Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(lowercase , [])
# We can optionnally pass directly the words and bounding boxes
a__: Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png'
a__: int = []
a__: Any = []
a__: Tuple = dqa_pipeline(image=lowercase , question=lowercase , words=lowercase , boxes=lowercase , top_k=2)
self.assertEqual(lowercase , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
a__: int = INVOICE_URL
a__: Dict = 'What is the invoice number?'
a__: Tuple = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Any = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
a__: Union[str, Any] = INVOICE_URL
a__: List[str] = 'What is the invoice number?'
a__: Optional[int] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Optional[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase)
a__: Union[str, Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , )
a__: str = INVOICE_URL
a__: Union[str, Any] = 'What is the invoice number?'
a__: Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a__: str = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a__: List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
a__: List[str] = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
# This model should also work if `image` is set to None
a__: Any = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Union[str, Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase)
a__: List[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , max_seq_len=50 , )
a__: int = INVOICE_URL
a__: Any = 'What is the invoice number?'
a__: Union[str, Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
a__: Any = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
# This model should also work if `image` is set to None
a__: Dict = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
a__: Optional[int] = INVOICE_URL
a__: Tuple = 'What is the invoice number?'
a__: str = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if height >= 1:
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_disk(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
move_tower(height - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
print('moving disk from' , _SCREAMING_SNAKE_CASE , 'to' , _SCREAMING_SNAKE_CASE )
def __a ( ) ->List[str]:
a__: Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(_SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 290 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a__: List[str] = dataset_size < in_memory_max_size
else:
a__: Tuple = False
a__: Union[str, Any] = is_small_dataset(_SCREAMING_SNAKE_CASE )
assert result == expected
| 290 | """simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | """simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = 42
def __init__( self , lowercase , lowercase) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 20_00 , lowercase = None , lowercase = "pil" , lowercase = True , **lowercase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
a__: Optional[int] = self.unet.config.sample_size
a__: Dict = (batch_size, 3, img_size, img_size)
a__: Dict = self.unet
a__: Optional[int] = randn_tensor(lowercase , generator=lowercase) * self.scheduler.init_noise_sigma
a__: str = sample.to(self.device)
self.scheduler.set_timesteps(lowercase)
self.scheduler.set_sigmas(lowercase)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
a__: List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
a__: Optional[int] = self.unet(lowercase , lowercase).sample
a__: List[str] = self.scheduler.step_correct(lowercase , lowercase , generator=lowercase).prev_sample
# prediction step
a__: List[Any] = model(lowercase , lowercase).sample
a__: Optional[Any] = self.scheduler.step_pred(lowercase , lowercase , lowercase , generator=lowercase)
a__ , a__: List[str] = output.prev_sample, output.prev_sample_mean
a__: int = sample_mean.clamp(0 , 1)
a__: Any = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase)
| 290 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: Optional[int] = SamImageProcessor()
a__: Tuple = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self.get_image_processor()
a__: List[Any] = SamProcessor(image_processor=lowercase)
a__: Optional[int] = self.prepare_image_inputs()
a__: Optional[Any] = image_processor(lowercase , return_tensors='np')
a__: Tuple = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.get_image_processor()
a__: List[str] = SamProcessor(image_processor=lowercase)
a__: Optional[Any] = [torch.ones((1, 3, 5, 5))]
a__: Union[str, Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: int = processor.post_process_masks(lowercase , lowercase , lowercase)
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Optional[int] = processor.post_process_masks(
lowercase , torch.tensor(lowercase) , torch.tensor(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Dict = [np.ones((1, 3, 5, 5))]
a__: Tuple = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowercase):
a__: List[Any] = processor.post_process_masks(lowercase , np.array(lowercase) , np.array(lowercase))
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = tempfile.mkdtemp()
a__: List[Any] = SamImageProcessor()
a__: Optional[int] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__: Dict = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__: Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_image_processor()
a__: str = SamProcessor(image_processor=lowercase)
a__: int = self.prepare_image_inputs()
a__: int = image_processor(lowercase , return_tensors='np')
a__: Dict = processor(images=lowercase , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Any = SamProcessor(image_processor=lowercase)
a__: str = [tf.ones((1, 3, 5, 5))]
a__: List[Any] = [[17_64, 26_46]]
a__: List[Any] = [[6_83, 10_24]]
a__: List[Any] = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: Tuple = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase) , tf.convert_to_tensor(lowercase) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
# should also work with np
a__: Optional[Any] = [np.ones((1, 3, 5, 5))]
a__: int = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46))
a__: List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
a__: Any = processor.post_process_masks(
lowercase , np.array(lowercase) , np.array(lowercase) , return_tensors='tf')
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = tempfile.mkdtemp()
a__: int = SamImageProcessor()
a__: Union[str, Any] = SamProcessor(lowercase)
processor.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self , **lowercase) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
a__: Any = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.get_image_processor()
a__: int = SamProcessor(image_processor=lowercase)
a__: int = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
a__: Dict = [tf.convert_to_tensor(lowercase)]
a__: Union[str, Any] = [torch.tensor(lowercase)]
a__: List[Any] = [[17_64, 26_46]]
a__: Optional[Any] = [[6_83, 10_24]]
a__: Tuple = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf')
a__: str = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.get_image_processor()
a__: Dict = SamProcessor(image_processor=lowercase)
a__: Any = self.prepare_image_inputs()
a__: List[Any] = image_processor(lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Tuple = processor(images=lowercase , return_tensors='pt')['pixel_values'].numpy()
a__: Any = image_processor(lowercase , return_tensors='tf')['pixel_values'].numpy()
a__: Any = processor(images=lowercase , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
self.assertTrue(np.allclose(lowercase , lowercase))
| 290 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = UnCLIPImageVariationPipeline
a__ = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
a__ = IMAGE_VARIATION_BATCH_PARAMS
a__ = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowercase)
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase)
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
a__: List[Any] = UnCLIPTextProjModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
a__: Optional[Any] = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: int = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(1)
a__: Any = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[str] = self.dummy_decoder
a__: str = self.dummy_text_proj
a__: Dict = self.dummy_text_encoder
a__: Dict = self.dummy_tokenizer
a__: Optional[int] = self.dummy_super_res_first
a__: Dict = self.dummy_super_res_last
a__: Tuple = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
a__: Dict = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
a__: Optional[int] = CLIPImageProcessor(crop_size=32 , size=32)
a__: int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCamelCase_ ( self , lowercase , lowercase=0 , lowercase=True) -> Optional[int]:
'''simple docstring'''
a__: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__: Dict = torch.manual_seed(lowercase)
else:
a__: str = torch.Generator(device=lowercase).manual_seed(lowercase)
if pil_image:
a__: List[Any] = input_image * 0.5 + 0.5
a__: Any = input_image.clamp(0 , 1)
a__: Tuple = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a__: Tuple = DiffusionPipeline.numpy_to_pil(lowercase)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: Optional[int] = self.get_dummy_components()
a__: Dict = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[Any] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: str = pipe(**lowercase)
a__: str = output.images
a__: List[str] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: Optional[int] = pipe(
**lowercase , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: List[str] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = 'cpu'
a__: int = self.get_dummy_components()
a__: Union[str, Any] = self.pipeline_class(**lowercase)
a__: int = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: int = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: str = pipe(**lowercase)
a__: Dict = output.images
a__: List[str] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: Any = pipe(
**lowercase , return_dict=lowercase , )[0]
a__: str = image[0, -3:, -3:, -1]
a__: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[int] = 'cpu'
a__: List[str] = self.get_dummy_components()
a__: Dict = self.pipeline_class(**lowercase)
a__: Any = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[Any] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: str = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
a__: Any = pipe(**lowercase)
a__: str = output.images
a__: Union[str, Any] = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: Optional[Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
a__: Tuple = pipe(
**lowercase , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
a__: str = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[Any] = torch.device('cpu')
class __snake_case :
a__ = 1
a__: Optional[int] = self.get_dummy_components()
a__: List[str] = self.pipeline_class(**lowercase)
a__: List[Any] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Tuple = torch.Generator(device=lowercase).manual_seed(0)
a__: Any = pipe.decoder.dtype
a__: Dict = 1
a__: int = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a__: List[Any] = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler())
a__: str = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a__: Tuple = pipe.prepare_latents(
lowercase , dtype=lowercase , device=lowercase , generator=lowercase , latents=lowercase , scheduler=DummyScheduler())
a__: Dict = self.get_dummy_inputs(lowercase , pil_image=lowercase)
a__: Optional[Any] = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase).images
a__: int = self.get_dummy_inputs(lowercase , pil_image=lowercase)
# Don't pass image, instead pass embedding
a__: Tuple = pipeline_inputs.pop('image')
a__: int = pipe.image_encoder(lowercase).image_embeds
a__: Union[str, Any] = pipe(
**lowercase , decoder_latents=lowercase , super_res_latents=lowercase , image_embeddings=lowercase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a__: Any = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , expected_max_diff=lowercase)
@skip_mps
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Any = torch_device == 'cpu'
a__: Any = True
a__: str = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a__: Union[str, Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase , additional_params_copy_to_batched_inputs=lowercase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase)
@skip_mps
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png')
a__: int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy')
a__: Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa)
a__: Dict = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__: Union[str, Any] = pipeline(
lowercase , generator=lowercase , output_type='np' , )
a__: int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowercase , lowercase , 15)
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase__ = get_logger(__name__)
lowercase__ = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class __snake_case :
@add_start_docstrings(lowercase)
def __call__( self , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class __snake_case :
@add_start_docstrings(lowercase)
def __call__( self , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class __snake_case ( __lowerCAmelCase ):
@add_start_docstrings(lowercase)
def __call__( self , lowercase , lowercase , lowercase , **lowercase) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
a__: Union[str, Any] = inspect.signature(processor.__call__).parameters
if len(lowercase) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys())} for '
f'{processor.__class__} are passed to the logits processor.')
a__: List[str] = processor(lowercase , lowercase , lowercase , **lowercase)
else:
a__: int = processor(lowercase , lowercase , lowercase)
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
if not isinstance(lowercase , lowercase) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}')
a__: Any = temperature
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__: Tuple = scores / self.temperature
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase = -float('Inf') , lowercase = 1) -> int:
'''simple docstring'''
if not isinstance(lowercase , lowercase) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')
if not isinstance(lowercase , lowercase) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')
a__: List[str] = top_p
a__: Dict = filter_value
a__: int = min_tokens_to_keep
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__ , a__: Tuple = lax.top_k(lowercase , scores.shape[-1])
a__: Optional[int] = jnp.full_like(lowercase , self.filter_value)
a__: List[Any] = jax.nn.softmax(lowercase , axis=-1).cumsum(axis=-1)
a__: Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a__: str = jnp.roll(lowercase , 1)
score_mask |= score_mask.at[:, 0].set(lowercase)
# min tokens to keep
a__: int = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase)
a__: Any = jnp.where(lowercase , lowercase , lowercase)
a__: List[Any] = jax.lax.sort_key_val(lowercase , lowercase)[-1]
return next_scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase = -float('Inf') , lowercase = 1) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase , lowercase) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')
a__: Dict = max(lowercase , lowercase)
a__: Any = filter_value
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__ , a__: str = scores.shape
a__: Any = jnp.full(batch_size * vocab_size , self.filter_value)
a__: Tuple = min(self.top_k , scores.shape[-1]) # Safety check
a__ , a__: List[Any] = lax.top_k(lowercase , lowercase)
a__: Union[str, Any] = jnp.broadcast_to((jnp.arange(lowercase) * vocab_size)[:, None] , (batch_size, topk)).flatten()
a__: Tuple = topk_scores.flatten()
a__: Optional[int] = topk_indices.flatten() + shift
a__: Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(lowercase)
a__: Optional[Any] = next_scores_flat.reshape(lowercase , lowercase)
return next_scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> Any:
'''simple docstring'''
a__: int = bos_token_id
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__: str = jnp.full(scores.shape , -float('inf'))
a__: Tuple = 1 - jnp.bool_(cur_len - 1)
a__: Union[str, Any] = jnp.where(lowercase , new_scores.at[:, self.bos_token_id].set(0) , lowercase)
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Dict = max_length
a__: List[Any] = eos_token_id
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__: List[str] = jnp.full(scores.shape , -float('inf'))
a__: List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1)
a__: str = jnp.where(lowercase , new_scores.at[:, self.eos_token_id].set(0) , lowercase)
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
if not isinstance(lowercase , lowercase) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')
if not isinstance(lowercase , lowercase) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')
a__: List[Any] = min_length
a__: Union[str, Any] = eos_token_id
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__: Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
a__: Any = jnp.where(lowercase , scores.at[:, self.eos_token_id].set(-float('inf')) , lowercase)
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = list(lowercase)
a__: Optional[Any] = begin_index
def __call__( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Dict = 1 - jnp.bool_(cur_len - self.begin_index)
a__: Union[str, Any] = jnp.where(lowercase , scores.at[:, self.begin_suppress_tokens].set(-float('inf')) , lowercase)
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Dict = list(lowercase)
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
a__: Dict = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> str:
'''simple docstring'''
a__: int = dict(lowercase)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a__: Dict = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
a__: List[str] = force_token_array.at[index].set(lowercase)
a__: Union[str, Any] = jnp.intaa(lowercase)
def __call__( self , lowercase , lowercase , lowercase) -> jnp.ndarray:
'''simple docstring'''
def _force_token(lowercase):
a__: str = scores.shape[0]
a__: Union[str, Any] = self.force_token_array[generation_idx]
a__: Any = jnp.ones_like(lowercase , dtype=scores.dtype) * -float('inf')
a__: str = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
a__: Any = lax.dynamic_update_slice(lowercase , lowercase , (0, current_token))
return new_scores
a__: List[str] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase) , lambda: scores , ) , )
return scores
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[str] = generate_config.eos_token_id
a__: int = generate_config.no_timestamps_token_id
a__: List[str] = generate_config.no_timestamps_token_id + 1
a__: Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowercase , 'max_initial_timestamp_index'):
a__: Tuple = generate_config.max_initial_timestamp_index
else:
a__: Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a__: str = model_config.vocab_size
def __call__( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float('inf'))
def handle_pairs(lowercase , lowercase):
a__: List[str] = jnp.where((cur_len - self.begin_index) >= 1 , lowercase , lowercase)
a__: Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase , )
a__: Dict = jnp.where((cur_len - self.begin_index) < 2 , lowercase , lowercase)
a__: Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase , lowercase , )
return jnp.where(
lowercase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf')) , scores_k.at[: self.eos_token_id].set(-float('inf')) , ) , lowercase , )
a__: Tuple = jax.vmap(lowercase)(lowercase , lowercase)
a__: Optional[int] = jnp.where(cur_len == self.begin_index , lowercase , lowercase)
a__: Dict = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase , )
a__: str = self.timestamp_begin + self.max_initial_timestamp_index
a__: Union[str, Any] = jnp.where(
lowercase , scores.at[:, last_allowed + 1 :].set(-float('inf')) , lowercase , )
# if sum of probability over timestamps is above any other token, sample timestamp
a__: List[str] = jax.nn.log_softmax(lowercase , axis=-1)
def handle_cumulative_probs(lowercase , lowercase):
a__: Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
a__: int = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf')) , lowercase , )
a__: List[Any] = jax.vmap(lowercase)(lowercase , lowercase)
return scores
| 290 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyVaaPipeline
a__ = [
"""image_embeds""",
"""negative_image_embeds""",
]
a__ = ["""image_embeds""", """negative_image_embeds"""]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: str = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: Any = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.dummy_unet
a__: Any = self.dummy_movq
a__: Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> int:
'''simple docstring'''
a__: Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase)).to(lowercase)
a__: Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase)
if str(lowercase).startswith('mps'):
a__: Tuple = torch.manual_seed(lowercase)
else:
a__: Union[str, Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Any = 'cpu'
a__: Dict = self.get_dummy_components()
a__: Any = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Any = pipe(**self.get_dummy_inputs(lowercase))
a__: Optional[Any] = output.images
a__: Dict = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: int = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: Dict = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy')
a__: List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Tuple = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa)
a__: Dict = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: List[Any] = 'red cat, 4k photo'
a__: Dict = torch.Generator(device='cuda').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: Any = torch.Generator(device='cuda').manual_seed(0)
a__: str = pipeline(
image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , output_type='np' , )
a__: Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self , lowercase = 1_28 , lowercase = 2_56 , lowercase = 2000.0 , lowercase = 7_68 , lowercase = 12 , lowercase = 12 , lowercase = 64 , lowercase = 20_48 , lowercase = 0.1 , ) -> int:
'''simple docstring'''
super().__init__()
a__: Dict = nn.Sequential(
nn.Linear(lowercase , d_model * 4 , bias=lowercase) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase) , nn.SiLU() , )
a__: Any = nn.Embedding(lowercase , lowercase)
a__: Dict = False
a__: Optional[int] = nn.Linear(lowercase , lowercase , bias=lowercase)
a__: Tuple = nn.Dropout(p=lowercase)
a__: Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowercase):
# FiLM conditional T5 decoder
a__: Optional[Any] = DecoderLayer(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , d_ff=lowercase , dropout_rate=lowercase)
self.decoders.append(lowercase)
a__: List[Any] = TaLayerNorm(lowercase)
a__: Optional[int] = nn.Dropout(p=lowercase)
a__: str = nn.Linear(lowercase , lowercase , bias=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__: List[str] = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ , a__ , a__: int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a__: Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
a__: Any = self.conditioning_emb(lowercase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a__: List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a__: List[str] = torch.broadcast_to(
torch.arange(lowercase , device=decoder_input_tokens.device) , (batch, seq_length) , )
a__: Union[str, Any] = self.position_encoding(lowercase)
a__: Tuple = self.continuous_inputs_projection(lowercase)
inputs += position_encodings
a__: Tuple = self.dropout(lowercase)
# decoder: No padding present.
a__: Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
a__: List[str] = [(x, self.encoder_decoder_mask(lowercase , lowercase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a__: Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
a__: int = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
a__: str = lyr(
lowercase , conditioning_emb=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )[0]
a__: Dict = self.decoder_norm(lowercase)
a__: Dict = self.post_dropout(lowercase)
a__: Union[str, Any] = self.spec_out(lowercase)
return spec_out
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=1e-6) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
a__: Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase))
def lowerCamelCase_ ( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__: List[Any] = self.layer[0](
lowercase , conditioning_emb=lowercase , attention_mask=lowercase , )
if encoder_hidden_states is not None:
a__: Any = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
a__: Tuple = self.layer[1](
lowercase , key_value_states=lowercase , attention_mask=lowercase , )
# Apply Film Conditional Feed Forward layer
a__: Optional[int] = self.layer[-1](lowercase , lowercase)
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
super().__init__()
a__: List[Any] = TaLayerNorm(lowercase)
a__: Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase)
a__: Union[str, Any] = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase)
a__: str = nn.Dropout(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase=None , lowercase=None , ) -> Any:
'''simple docstring'''
a__: int = self.layer_norm(lowercase)
if conditioning_emb is not None:
a__: int = self.FiLMLayer(lowercase , lowercase)
# Self-attention block
a__: str = self.attention(lowercase)
a__: Tuple = hidden_states + self.dropout(lowercase)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
super().__init__()
a__: Any = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase)
a__: Union[str, Any] = TaLayerNorm(lowercase , eps=lowercase)
a__: Dict = nn.Dropout(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase=None , lowercase=None , ) -> List[str]:
'''simple docstring'''
a__: Optional[int] = self.layer_norm(lowercase)
a__: str = self.attention(
lowercase , encoder_hidden_states=lowercase , attention_mask=attention_mask.squeeze(1) , )
a__: Tuple = hidden_states + self.dropout(lowercase)
return layer_output
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
a__: str = TaDenseGatedActDense(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase)
a__: List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase)
a__: str = TaLayerNorm(lowercase , eps=lowercase)
a__: List[str] = nn.Dropout(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase=None) -> Dict:
'''simple docstring'''
a__: int = self.layer_norm(lowercase)
if conditioning_emb is not None:
a__: Union[str, Any] = self.film(lowercase , lowercase)
a__: Dict = self.DenseReluDense(lowercase)
a__: Dict = hidden_states + self.dropout(lowercase)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
super().__init__()
a__: str = nn.Linear(lowercase , lowercase , bias=lowercase)
a__: Tuple = nn.Linear(lowercase , lowercase , bias=lowercase)
a__: str = nn.Linear(lowercase , lowercase , bias=lowercase)
a__: int = nn.Dropout(lowercase)
a__: int = NewGELUActivation()
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
a__: Optional[int] = self.act(self.wi_a(lowercase))
a__: List[Any] = self.wi_a(lowercase)
a__: Dict = hidden_gelu * hidden_linear
a__: Optional[Any] = self.dropout(lowercase)
a__: List[str] = self.wo(lowercase)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase=1e-6) -> str:
'''simple docstring'''
super().__init__()
a__: Any = nn.Parameter(torch.ones(lowercase))
a__: Any = eps
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowercase)
a__: Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a__: Union[str, Any] = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def lowerCamelCase_ ( self , lowercase) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(lowercase , 3.0))))
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
super().__init__()
a__: List[str] = nn.Linear(lowercase , out_features * 2 , bias=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = self.scale_bias(lowercase)
a__ , a__: Union[str, Any] = torch.chunk(lowercase , 2 , -1)
a__: Optional[int] = x * (1 + scale) + shift
return x
| 290 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ = 'src/transformers'
lowercase__ = 'docs/source/en/tasks'
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__: Union[str, Any] = f.readlines()
# Find the start prompt.
a__: Optional[int] = 0
while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
a__: Any = start_index
while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
a__: int = TASK_GUIDE_TO_MODELS[task_guide]
a__: Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_SCREAMING_SNAKE_CASE , set() )
a__: Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
a__ , a__ , a__ , a__: int = _find_text_in_file(
filename=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a__: Optional[Any] = get_model_list_for_task(_SCREAMING_SNAKE_CASE )
if current_list != new_list:
if overwrite:
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = SMALL_MODEL_IDENTIFIER
a__: List[Any] = 'pt'
a__: Union[str, Any] = 'tf'
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Optional[int] = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(lowercase)
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
a__: List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase)
model_tf.save_pretrained(lowercase)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
a__: Optional[Any] = FeaturesManager.determine_framework(self.test_model , lowercase)
self.assertEqual(lowercase , lowercase)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase)
a__: str = FeaturesManager.determine_framework(lowercase , lowercase)
self.assertEqual(lowercase , lowercase)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase)
a__: Optional[int] = FeaturesManager.determine_framework(lowercase , lowercase)
self.assertEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase)
a__: Optional[Any] = FeaturesManager.determine_framework(lowercase)
self.assertEqual(lowercase , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase)
a__: List[Any] = FeaturesManager.determine_framework(lowercase)
self.assertEqual(lowercase , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase):
a__: Tuple = FeaturesManager.determine_framework(lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = MagicMock(return_value=lowercase)
with patch('transformers.onnx.features.is_tf_available' , lowercase):
a__: Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(lowercase , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
a__: Optional[int] = MagicMock(return_value=lowercase)
with patch('transformers.onnx.features.is_torch_available' , lowercase):
a__: Optional[int] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(lowercase , self.framework_tf)
# Both in environment -> use PyTorch
a__: str = MagicMock(return_value=lowercase)
a__: int = MagicMock(return_value=lowercase)
with patch('transformers.onnx.features.is_tf_available' , lowercase), patch(
'transformers.onnx.features.is_torch_available' , lowercase):
a__: int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(lowercase , self.framework_pt)
# Both not in environment -> raise error
a__: List[Any] = MagicMock(return_value=lowercase)
a__: Union[str, Any] = MagicMock(return_value=lowercase)
with patch('transformers.onnx.features.is_tf_available' , lowercase), patch(
'transformers.onnx.features.is_torch_available' , lowercase):
with self.assertRaises(lowercase):
a__: List[str] = FeaturesManager.determine_framework(self.test_model)
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
"""simple docstring"""
class __snake_case :
def __init__( self , lowercase) -> Any:
'''simple docstring'''
a__: str = arr.split(',')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = [int(self.array[0])] * len(self.array)
a__: List[Any] = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
a__: List[Any] = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
a__: int = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
lowercase__ = input('please input some numbers:')
lowercase__ = SubArray(whole_array)
lowercase__ = array.solve_sub_array()
print(('the results is:', re))
| 290 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __snake_case ( __lowerCAmelCase ):
a__ = """xmod"""
def __init__( self , lowercase=3_05_22 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=2 , lowercase=False , lowercase=True , lowercase=True , lowercase=("en_XX",) , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: str = vocab_size
a__: str = hidden_size
a__: List[str] = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Optional[Any] = hidden_act
a__: int = intermediate_size
a__: int = hidden_dropout_prob
a__: List[str] = attention_probs_dropout_prob
a__: Optional[int] = max_position_embeddings
a__: Optional[int] = type_vocab_size
a__: int = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Tuple = use_cache
a__: Dict = classifier_dropout
a__: List[Any] = pre_norm
a__: Tuple = adapter_reduction_factor
a__: Dict = adapter_layer_norm
a__: List[str] = adapter_reuse_layer_norm
a__: Tuple = ln_before_adapter
a__: int = list(lowercase)
a__: List[str] = default_language
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string'),
'references': datasets.Value('string'),
}) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Any = 0.0
for i, j in zip(lowercase , lowercase):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase , lowercase) else 0.0
a__: List[str] = n_correct / len(lowercase)
return {
"accuracy": accuracy,
}
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.