code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import math
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * n
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = index + i
SCREAMING_SNAKE_CASE_ : Tuple = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def A_ ( a = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = math.floor(math.sqrt(a ) ) + 1_0_0
SCREAMING_SNAKE_CASE_ : int = prime_sieve(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Optional[Any] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = last_prime**2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : int = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 511 |
from __future__ import annotations
lowerCAmelCase : List[Any] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( a , a , a , a ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( a ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( a ):
"""simple docstring"""
if location := find_empty_location(a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(a , a , a , a ):
SCREAMING_SNAKE_CASE_ : List[str] = digit
if sudoku(a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : List[Any] = 0
return None
def A_ ( a ):
"""simple docstring"""
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowerCAmelCase : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 511 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ : Tuple = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : str , __a : int=None , __a : Dict=True , __a : Optional[int]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(__a , __a ):
_a = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 718 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Optional[int] , **__a : List[Any] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int , __a : Tuple ):
_a = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : int , __a : Union[str, Any] , __a : str ):
_a = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __a )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCamelCase__ ( self : List[Any] ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = "Intel/dpt-large"
_a = pipeline("depth-estimation" , model=__a )
_a = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_a = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 521 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
__a = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_zero=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 16, 16) , rng=random.Random(__A ) ).to(__A )
__a = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__a = self.get_dummy_inputs(__A )
__a = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
__a = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(__A )
__a = pipe_loaded(**__A )[0]
__a = np.abs(output - output_loaded ).max()
self.assertLess(__A , 1E-4 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_mask_inputs(__A )
__a = pipe.generate_mask(**__A )
__a = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__a = np.array([0] * 9 )
__a = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inversion_inputs(__A )
__a = pipe.invert(**__A ).images
__a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
__a = DPMSolverMultistepScheduler(**__A )
__a = DPMSolverMultistepInverseScheduler(**__A )
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inversion_inputs(__A )
__a = pipe.invert(**__A ).images
__a = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def snake_case_ ( cls ):
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
__a = raw_image.convert("""RGB""" ).resize((768, 768) )
__a = raw_image
def snake_case_ ( self ):
__a = torch.manual_seed(0 )
__a = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__a = DDIMScheduler.from_config(pipe.scheduler.config )
__a = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = """a bowl of fruit"""
__a = """a bowl of pears"""
__a = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__a = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A ).latents
__a = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
__a = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def snake_case_ ( self ):
__a = torch.manual_seed(0 )
__a = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=__A , torch_dtype=torch.floataa )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = """a bowl of fruit"""
__a = """a bowl of pears"""
__a = pipe.generate_mask(
image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , )
__a = pipe.invert(
prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A , num_inference_steps=25 , ).latents
__a = pipe(
prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
__a = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 99 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( _lowerCAmelCase : int ) -> list[str]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Tuple = int("""1""" + """0""" * digit_len )
for num in range(_lowerCAmelCase, _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase, _lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def UpperCamelCase ( _lowerCAmelCase : int = 2 ) -> int:
_UpperCAmelCase : Dict = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 238 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = 0
SCREAMING_SNAKE_CASE__ :Any = len(UpperCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if len(UpperCAmelCase__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__ :Tuple = len(UpperCAmelCase__ ) // 2
SCREAMING_SNAKE_CASE__ :Tuple = arr[0:mid]
SCREAMING_SNAKE_CASE__ :Optional[int] = arr[mid:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[Any] = count_inversions_recursive(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = count_inversions_recursive(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = _count_cross_inversions(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = []
SCREAMING_SNAKE_CASE__ :str = 0
while i < len(UpperCAmelCase__ ) and j < len(UpperCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__ :Any = count_inversions_bf(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__ :List[str] = count_inversions_bf(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__ :Union[str, Any] = []
SCREAMING_SNAKE_CASE__ :int = count_inversions_bf(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 320 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase_ = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if os.path.exists(UpperCAmelCase__ ):
if os.path.exists(os.path.join(UpperCAmelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCAmelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = 2
if unlogit:
SCREAMING_SNAKE_CASE__ :int = torch.pow(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = p * torch.log(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCAmelCase__ ) ) ) )
for row in range(len(UpperCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE__ :List[str] = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE__ :Any = torch.ones(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
SCREAMING_SNAKE_CASE__ :Dict = 0.0
SCREAMING_SNAKE_CASE__ :Any = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE__) , ) :Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = entropy(attn.detach() , UpperCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE__ :List[str] = 2
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.pow(torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE__ :Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCAmelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCAmelCase__ )
logger.info('Head ranked by importance scores' )
SCREAMING_SNAKE_CASE__ :str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
SCREAMING_SNAKE_CASE__ :Any = torch.arange(
head_importance.numel() , device=args.device )
SCREAMING_SNAKE_CASE__ :Optional[Any] = head_ranks.view_as(UpperCAmelCase__ )
print_ad_tensor(UpperCAmelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase__ , original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.ones_like(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE__ :str = original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE__ :Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE__ :str = float('Inf' )
SCREAMING_SNAKE_CASE__ :str = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
SCREAMING_SNAKE_CASE__ :Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE__ :List[Any] = new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE__ :str = 0.0
SCREAMING_SNAKE_CASE__ :Any = new_head_mask.view_as(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase__ )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCAmelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = 1 / loss
SCREAMING_SNAKE_CASE__ :List[Any] = datetime.now() - before_time
SCREAMING_SNAKE_CASE__ :Union[str, Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ :Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Any = [
v,
]
assert sum(len(UpperCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ :Optional[int] = datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , actually_pruned=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ :List[Any] = 1 / loss
SCREAMING_SNAKE_CASE__ :Optional[int] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase__ , UpperCAmelCase__ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(UpperCAmelCase__ , args.output_dir )
def lowerCamelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCAmelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCAmelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCAmelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCAmelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=UpperCAmelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCAmelCase__ , default=4_2 )
parser.add_argument('--local_rank' , type=UpperCAmelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCAmelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase__ , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE__ :Tuple = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
SCREAMING_SNAKE_CASE__ :Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.device('cuda' , args.local_rank )
SCREAMING_SNAKE_CASE__ :List[Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE__ :Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE__ :Tuple = nn.parallel.DistributedDataParallel(
UpperCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase__ )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE__ :Optional[int] = nn.DataParallel(UpperCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Prepare dataset
SCREAMING_SNAKE_CASE__ :Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = (torch.from_numpy(UpperCAmelCase__ ),)
SCREAMING_SNAKE_CASE__ :List[str] = TensorDataset(*UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = RandomSampler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE__ :Tuple = mask_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
prune_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 320 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=A , )
assert hasattr(self , '''env''' )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_a = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version='''py36''' , )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
_a = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A )
| 11 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ): # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Node(1 )
_SCREAMING_SNAKE_CASE = Node(2 )
_SCREAMING_SNAKE_CASE = Node(3 )
_SCREAMING_SNAKE_CASE = Node(4 )
_SCREAMING_SNAKE_CASE = Node(5 )
_SCREAMING_SNAKE_CASE = Node(6 )
_SCREAMING_SNAKE_CASE = Node(7 )
_SCREAMING_SNAKE_CASE = Node(8 )
_SCREAMING_SNAKE_CASE = Node(9 )
print(is_full_binary_tree(UpperCAmelCase__ ) )
print(depth_of_tree(UpperCAmelCase__ ) )
print('Tree is: ' )
display(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 605 | 0 |
def A__ ( lowercase: int ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
A : int =4
A : List[Any] =(1 << p) - 1
for _ in range(p - 2 ):
A : Optional[int] =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 661 | import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Optional[int] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''efficientnet'''
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(lowerCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1E-5 | 108 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 520 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
snake_case_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : List[str] ) -> Tuple:
return 32
@property
def __magic_name__ ( self : List[str] ) -> str:
return 32
@property
def __magic_name__ ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : List[Any] ) -> int:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return 1_00
@property
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase )
return model
@property
def __magic_name__ ( self : Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase )
SCREAMING_SNAKE_CASE__ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowercase , __lowercase ) | 665 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''') | 665 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = 42
A = 42
A = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 479 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479 | 1 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 1
while True:
i += 1
t_num += i
if count_divisors(_SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 702 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case = {'''facebook/blenderbot-3B''': 1_2_8}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ['''input_ids''', '''attention_mask''']
A__ : int = BlenderbotTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]="replace" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
_snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**__lowerCamelCase )
_snake_case = add_prefix_space
_snake_case = '''post_processor'''
_snake_case = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['''sep'''] )
if "cls" in state:
_snake_case = tuple(state['''cls'''] )
_snake_case = False
if state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('''trim_offsets''' , __lowerCamelCase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(__lowerCamelCase , state.pop('''type''' ) )
_snake_case = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
_snake_case = value
def __UpperCAmelCase ( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : str , __lowerCamelCase : "Conversation" ):
"""simple docstring"""
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
_snake_case = ''' '''.join(__lowerCamelCase )
_snake_case = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 103 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """megatron-bert"""
def __init__( self , a__=2_9056 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(pad_token_id=a__ , **a__)
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Optional[Any] = use_cache
| 708 |
_lowerCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
assert len(str(lowercase_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_lowerCamelCase : List[Any] = year // 1_00
_lowerCamelCase : Dict = (5 * (century % 4) + 2) % 7
_lowerCamelCase : Tuple = year % 1_00
_lowerCamelCase : int = centurian % 12
_lowerCamelCase : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_lowerCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_lowerCamelCase : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(A ):
__A = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,A )
__A = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,A )
@slow
def UpperCamelCase_ ( self : Tuple ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(A ):
__A = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,A )
__A = FlaxAutoModel.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,A )
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__A = AutoTokenizer.from_pretrained(A )
__A = FlaxBertModel.from_pretrained(A )
__A = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**A : Optional[int] ):
return model(**A )
eval(**A ).block_until_ready()
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in ["roberta-base", "roberta-large"]:
__A = AutoTokenizer.from_pretrained(A )
__A = FlaxRobertaModel.from_pretrained(A )
__A = tokenizer("Do you support jax jitted function?" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**A : Optional[int] ):
return model(**A )
eval(**A ).block_until_ready()
def UpperCamelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
A ,"bert-base is not a local folder and is not a valid model identifier" ):
__A = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCamelCase_ ( self : List[str] ):
with self.assertRaisesRegex(
A ,R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__A = FlaxAutoModel.from_pretrained(A ,revision="aaaaaa" )
def UpperCamelCase_ ( self : List[str] ):
with self.assertRaisesRegex(
A ,"hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" ,):
__A = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase_ ( self : Any ):
with self.assertRaisesRegex(A ,"Use `from_pt=True` to load this model" ):
__A = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 55 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase = """pt"""
elif is_tf_available():
_UpperCAmelCase = """tf"""
else:
_UpperCAmelCase = """jax"""
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = ByTaTokenizer
UpperCamelCase : Any = False
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: Optional[Any] =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase__ ( self : List[Any] , **lowerCAmelCase : Any ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]=20 , lowerCAmelCase : Any=5 ) -> Tuple[str, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for i in range(len(lowerCAmelCase ) ):
try:
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_: Dict =list(filter(lambda lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[Any] =list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase ) , lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
SCREAMING_SNAKE_CASE_: Union[str, Any] =toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
SCREAMING_SNAKE_CASE_: Dict =toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_: Tuple =[t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_: Dict =tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE_: str =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_: Tuple =""" """ + output_txt
SCREAMING_SNAKE_CASE_: Dict =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE_: int =tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: List[Any] ="""Unicode €."""
SCREAMING_SNAKE_CASE_: Any =tokenizer(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE_: str =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase )
# decoding
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: int =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
SCREAMING_SNAKE_CASE_: str =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_: Any =list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_: Dict =list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: List[Any] =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCAmelCase )
self.assertIn("""attention_mask""" , lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: str =[
"""Summary of the text.""",
"""Another summary.""",
]
SCREAMING_SNAKE_CASE_: Any =tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: Optional[int] =["""A long paragraph for summarization. </s>"""]
SCREAMING_SNAKE_CASE_: List[str] =["""Summary of the text. </s>"""]
# fmt: off
SCREAMING_SNAKE_CASE_: Tuple =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_: List[str] =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer(lowerCAmelCase , text_target=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(lowerCAmelCase , batch["""labels"""][0] )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Any =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: str =""" He is very happy, UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE_: Any =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Any =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Dict =""" He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE_: str =tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE_: str =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.__class__.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_: str =tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE_: Tuple =json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE_: List[Any] =json.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =[f'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_: Optional[Any] =added_tokens_extra_ids + [
"""an_additional_special_token"""
]
SCREAMING_SNAKE_CASE_: List[str] =added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_class.from_pretrained(
lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_: Optional[int] =added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCAmelCase )]
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer_class.from_pretrained(lowerCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizers(fast=lowerCAmelCase , do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: List[Any] =["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
SCREAMING_SNAKE_CASE_: str =tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: str =[
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.convert_ids_to_tokens(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
for attr in attributes_list:
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , attr + """_id""" , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase , attr + """_id""" ) , lowerCAmelCase )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 36 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __snake_case ( __A : int = 1000000 , __A : int = 10 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = defaultdict(SCREAMING_SNAKE_CASE_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_: Any = image.size
lowerCamelCase_ , lowerCamelCase_: Optional[int] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase_: Dict = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCamelCase_: int = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_: Dict = image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase_: str = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , A_ : VQModel , A_ : UNetaDModel , A_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=A_ , unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self : List[Any] , A_ : Union[torch.Tensor, PIL.Image.Image] = None , A_ : Optional[int] = 1 , A_ : Optional[int] = 1_00 , A_ : Optional[float] = 0.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(A_ , PIL.Image.Image ):
lowerCamelCase_: List[str] = 1
elif isinstance(A_ , torch.Tensor ):
lowerCamelCase_: Optional[Any] = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A_ )}""" )
if isinstance(A_ , PIL.Image.Image ):
lowerCamelCase_: List[Any] = preprocess(A_ )
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase_: Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase_: List[str] = next(self.unet.parameters() ).dtype
lowerCamelCase_: Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
lowerCamelCase_: Any = image.to(device=self.device , dtype=A_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A_ , device=self.device )
lowerCamelCase_: List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_: int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_: Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_: Tuple = {}
if accepts_eta:
lowerCamelCase_: List[Any] = eta
for t in self.progress_bar(A_ ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase_: int = torch.cat([latents, image] , dim=1 )
lowerCamelCase_: Dict = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
lowerCamelCase_: int = self.unet(A_ , A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_: List[Any] = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase_: Optional[int] = self.vqvae.decode(A_ ).sample
lowerCamelCase_: List[str] = torch.clamp(A_ , -1.0 , 1.0 )
lowerCamelCase_: int = image / 2 + 0.5
lowerCamelCase_: str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_: Optional[int] = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 584 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 584 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Dict = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( A_):
'''simple docstring'''
__magic_name__ : Optional[int] = """time_series_transformer"""
__magic_name__ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "student_t" , UpperCamelCase__ : str = "nll" , UpperCamelCase__ : int = 1 , UpperCamelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase__ : Optional[Union[str, bool]] = "mean" , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : int = 64 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : List[str]=True , **UpperCamelCase__ : Optional[Any] , ):
A__ : str =prediction_length
A__ : Any =context_length or prediction_length
A__ : Optional[int] =distribution_output
A__ : List[Any] =loss
A__ : List[Any] =input_size
A__ : Dict =num_time_features
A__ : Optional[int] =lags_sequence
A__ : List[str] =scaling
A__ : Union[str, Any] =num_dynamic_real_features
A__ : str =num_static_real_features
A__ : Dict =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A__ : Any =cardinality
else:
A__ : Tuple =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A__ : int =embedding_dimension
else:
A__ : Dict =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ : Tuple =num_parallel_samples
# Transformer architecture configuration
A__ : Any =input_size * len(UpperCamelCase__ ) + self._number_of_features
A__ : List[str] =d_model
A__ : Any =encoder_attention_heads
A__ : List[Any] =decoder_attention_heads
A__ : Dict =encoder_ffn_dim
A__ : Dict =decoder_ffn_dim
A__ : Optional[int] =encoder_layers
A__ : List[Any] =decoder_layers
A__ : Dict =dropout
A__ : Union[str, Any] =attention_dropout
A__ : List[str] =activation_dropout
A__ : List[Any] =encoder_layerdrop
A__ : Any =decoder_layerdrop
A__ : str =activation_function
A__ : List[str] =init_std
A__ : List[Any] =use_cache
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _UpperCAmelCase ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 656 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_lowercase : Optional[Any] = load_file(UpperCAmelCase_ )
_lowercase : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_lowercase : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_lowercase : int = pipeline.text_encoder
else:
_lowercase : Optional[int] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_lowercase : Optional[int] = pipeline.unet
# find the target layer
_lowercase : Union[str, Any] = layer_infos.pop(0 )
while len(UpperCAmelCase_ ) > -1:
try:
_lowercase : Optional[Any] = curr_layer.__getattr__(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_lowercase : Tuple = layer_infos.pop(0 )
elif len(UpperCAmelCase_ ) == 0:
break
except Exception:
if len(UpperCAmelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_lowercase : Tuple = layer_infos.pop(0 )
_lowercase : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(UpperCAmelCase_ )
else:
pair_keys.append(UpperCAmelCase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_lowercase : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_lowercase : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase_ )
return pipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.base_model_path
UpperCamelCase__ = args.checkpoint_path
UpperCamelCase__ = args.dump_path
UpperCamelCase__ = args.lora_prefix_unet
UpperCamelCase__ = args.lora_prefix_text_encoder
UpperCamelCase__ = args.alpha
UpperCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 322 | 0 |
SCREAMING_SNAKE_CASE : Optional[Any] = 8.314462 # Unit - J mol-1 K-1
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 | import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :Dict = XLMProphetNetTokenizer
a__ :List[Any] = False
a__ :str = True
def A_ (self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : str = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ (self ) -> str:
UpperCamelCase_ : int = """[PAD]"""
UpperCamelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ (self ) -> int:
UpperCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 1_012 )
def A_ (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def A_ (self ) -> List[str]:
UpperCamelCase_ : Dict = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ : List[str] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def A_ (self ) -> Optional[int]:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = """Hello World!"""
UpperCamelCase_ : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def A_ (self ) -> Optional[int]:
# fmt: off
UpperCamelCase_ : Optional[int] = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 138 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE:
@staticmethod
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipeline(
'''zero-shot-object-detection''' ,model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__SCREAMING_SNAKE_CASE :Any = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = object_detector(examples[0] ,threshold=0.0 )
__SCREAMING_SNAKE_CASE :List[Any] = len(SCREAMING_SNAKE_CASE__ )
self.assertGreater(SCREAMING_SNAKE_CASE__ ,0 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
{
'''score''': ANY(SCREAMING_SNAKE_CASE__ ),
'''label''': ANY(SCREAMING_SNAKE_CASE__ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE__ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE__ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE__ )},
}
for i in range(SCREAMING_SNAKE_CASE__ )
] ,)
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = pipeline(
'''zero-shot-object-detection''' ,model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__SCREAMING_SNAKE_CASE :List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,threshold=0.6_4 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] ,)
__SCREAMING_SNAKE_CASE :Optional[Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] ,threshold=0.6_4 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] ,)
@require_torch
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = pipeline('''zero-shot-object-detection''' )
__SCREAMING_SNAKE_CASE :str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] ,)
__SCREAMING_SNAKE_CASE :List[str] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] ,)
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
@require_torch
@slow
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = 0.2
__SCREAMING_SNAKE_CASE :List[Any] = pipeline('''zero-shot-object-detection''' )
__SCREAMING_SNAKE_CASE :List[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,threshold=SCREAMING_SNAKE_CASE__ ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] ,)
@require_torch
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = 2
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipeline('''zero-shot-object-detection''' )
__SCREAMING_SNAKE_CASE :str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,candidate_labels=['''cat''', '''remote''', '''couch'''] ,top_k=SCREAMING_SNAKE_CASE__ ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4 ) ,[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] ,) | 498 |
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( a_ : Optional[Any] , a_ : bool = False ) -> Tuple:
with open(a_ , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE :Dict = content.split('''\n''' )
__SCREAMING_SNAKE_CASE :List[Any] = []
__SCREAMING_SNAKE_CASE :Optional[int] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE :str = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE :List[str] = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE :Optional[Any] = sorted(a_ , key=lambda a_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def __lowerCamelCase ( a_ : bool = False ) -> int:
__SCREAMING_SNAKE_CASE :str = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith('''.py''' )]
__SCREAMING_SNAKE_CASE :List[str] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
__SCREAMING_SNAKE_CASE :str = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(a_ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 498 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
SCREAMING_SNAKE_CASE : Dict = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(lowerCamelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
SCREAMING_SNAKE_CASE : Optional[int] = optax.softmax_cross_entropy(lowerCamelCase_ , onehot(lowerCamelCase_ , logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE : List[Any] = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE : int = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 714 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(features=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
import torch
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column:
if all(
isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase_ )
return column
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
import torch
if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ):
return value
elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : str = {}
if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa}
elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ )
return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ )
return self.recursive_tensorize(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ )
return column
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ )
for column_name in batch:
SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] )
return batch
| 79 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
'''simple docstring'''
__A = PhobertTokenizer
__A = False
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_UpperCamelCase = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase = ['''#version: 0.2''', '''l à</w>''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n')
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__snake_case))
def __UpperCAmelCase ( self : int , **lowercase_ : Tuple) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__snake_case)
def __UpperCAmelCase ( self : int , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = '''Tôi là VinAI Research'''
_UpperCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCamelCase = '''Tôi là VinAI Research'''
_UpperCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_UpperCamelCase = tokenizer.tokenize(__snake_case)
print(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case) , __snake_case)
| 547 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'maskformer'
_UpperCAmelCase = {'hidden_size': 'mask_feature_size'}
_UpperCAmelCase = ['resnet', 'swin']
_UpperCAmelCase = ['detr']
def __init__( self : List[str] , __snake_case : int = 256 , __snake_case : int = 256 , __snake_case : float = 0.1 , __snake_case : bool = False , __snake_case : Optional[Dict] = None , __snake_case : Optional[Dict] = None , __snake_case : float = 0.0_2 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 2_0.0 , __snake_case : Optional[bool] = None , **__snake_case : Any , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase :Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :int = backbone_config.pop('''model_type''' )
lowerCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase :Union[str, Any] = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase :Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase :int = (
decoder_config.pop('''model_type''' ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :List[Any] = CONFIG_MAPPING[decoder_type]
lowerCamelCase :Optional[Any] = config_class.from_dict(__snake_case )
lowerCamelCase :Tuple = backbone_config
lowerCamelCase :int = decoder_config
# main feature dimension for the model
lowerCamelCase :Union[str, Any] = fpn_feature_size
lowerCamelCase :List[Any] = mask_feature_size
# initializer
lowerCamelCase :Any = init_std
lowerCamelCase :List[str] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase :List[str] = cross_entropy_weight
lowerCamelCase :Union[str, Any] = dice_weight
lowerCamelCase :Dict = mask_weight
lowerCamelCase :Optional[Any] = use_auxiliary_loss
lowerCamelCase :Dict = no_object_weight
lowerCamelCase :List[Any] = output_auxiliary_logits
lowerCamelCase :Any = self.decoder_config.encoder_attention_heads
lowerCamelCase :List[str] = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def snake_case ( cls : Tuple , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Any ):
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase :Dict = self.backbone_config.to_dict()
lowerCamelCase :int = self.decoder_config.to_dict()
lowerCamelCase :List[str] = self.__class__.model_type
return output
| 166 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase,'''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=64,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase="swish",__lowerCamelCase=3,__lowerCamelCase=32,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=10,__lowerCamelCase=None,__lowerCamelCase=0.25,__lowerCamelCase=0.0,__lowerCamelCase=0.0,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = make_divisible(512 * width_multiplier,divisor=8 )
A__ = hidden_act
A__ = conv_kernel_size
A__ = output_stride
A__ = classifier_dropout_prob
A__ = use_labels
A__ = is_training
A__ = num_labels
A__ = initializer_range
A__ = scope
A__ = width_multiplier
A__ = ffn_dropout
A__ = attn_dropout
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.num_labels )
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size],self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_act=self.hidden_act,conv_kernel_size=self.conv_kernel_size,output_stride=self.output_stride,classifier_dropout_prob=self.classifier_dropout_prob,initializer_range=self.initializer_range,width_multiplier=self.width_multiplier,ffn_dropout=self.ffn_dropout_prob,attn_dropout=self.attn_dropout_prob,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = MobileViTVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = MobileViTVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = MobileViTVaForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
),)
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = MobileViTVaModelTester(self )
A__ = MobileViTVaConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def UpperCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def UpperCamelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.hidden_states
A__ = 5
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A__ = 2
for i in range(len(__lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ),[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor],)
divisor *= 2
self.assertEqual(self.model_tester.output_stride,divisor // 2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = MobileViTVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
A__ = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ = model.to(__lowerCamelCase )
A__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape,__lowerCamelCase )
A__ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
],device=__lowerCamelCase,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ = model.to(__lowerCamelCase )
A__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase,target_sizes=[(50, 60)] )
A__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape,__lowerCamelCase )
A__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
A__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape,__lowerCamelCase )
| 212 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__: Any = logging.get_logger(__name__)
a__: Optional[int] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mctct'''
def __init__( self,__lowerCamelCase=8065,__lowerCamelCase=1536,__lowerCamelCase=36,__lowerCamelCase=6144,__lowerCamelCase=4,__lowerCamelCase=384,__lowerCamelCase=920,__lowerCamelCase=1E-5,__lowerCamelCase=0.3,__lowerCamelCase="relu",__lowerCamelCase=0.02,__lowerCamelCase=0.3,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=(7,),__lowerCamelCase=(3,),__lowerCamelCase=80,__lowerCamelCase=1,__lowerCamelCase=None,__lowerCamelCase="sum",__lowerCamelCase=False,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = num_attention_heads
A__ = attention_head_dim
A__ = max_position_embeddings
A__ = layer_norm_eps
A__ = layerdrop
A__ = hidden_act
A__ = initializer_range
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = pad_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = conv_glu_dim
A__ = conv_dropout
A__ = num_conv_layers
A__ = input_feat_per_channel
A__ = input_channels
A__ = conv_channels
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 212 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : str = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase_ )
__lowercase : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''sigmoid'''
_A : Any = '''softmax'''
_A : Tuple = '''none'''
@add_end_docstrings(
__a , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = False
_A : str = ClassificationFunction.NONE
def __init__( self : Any , **__a : Any ) -> List[str]:
"""simple docstring"""
super().__init__(**__a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase ( self : List[Any] , __a : int=None , __a : List[str]=None , __a : List[Any]="" , **__a : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Dict = tokenizer_kwargs
__lowercase : List[Any] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__lowercase : List[str] = self.model.config.return_all_scores
if isinstance(__a , __a ) or top_k is None:
__lowercase : Any = top_k
__lowercase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __a , )
if return_all_scores:
__lowercase : Dict = None
else:
__lowercase : Any = 1
if isinstance(__a , __a ):
__lowercase : int = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowercase : str = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : int , *__a : Dict , **__a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = super().__call__(*__a , **__a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowercase : Dict = """top_k""" not in kwargs
if isinstance(args[0] , __a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase ( self : int , __a : Dict , **__a : Optional[int] ) -> Dict[str, GenericTensor]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.framework
if isinstance(__a , __a ):
return self.tokenizer(**__a , return_tensors=__a , **__a )
elif isinstance(__a , __a ) and len(__a ) == 1 and isinstance(inputs[0] , __a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__a , **__a )
elif isinstance(__a , __a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__a , return_tensors=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**__a )
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : Tuple=None , __a : str=1 , __a : List[Any]=True ) -> str:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowercase : int = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowercase : Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__lowercase : int = self.model.config.function_to_apply
else:
__lowercase : Tuple = ClassificationFunction.NONE
__lowercase : List[Any] = model_outputs["""logits"""][0]
__lowercase : Any = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowercase : List[str] = sigmoid(__a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowercase : Dict = softmax(__a )
elif function_to_apply == ClassificationFunction.NONE:
__lowercase : Any = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowercase : int = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__a )
]
if not _legacy:
dict_scores.sort(key=lambda __a : x["score"] , reverse=__a )
if top_k is not None:
__lowercase : int = dict_scores[:top_k]
return dict_scores | 149 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __a : str = "cpu" , __a : str = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
__lowercase : Tuple = device
__lowercase : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__a )
__lowercase : int = [0.48145466, 0.4578275, 0.40821073]
__lowercase : Optional[Any] = [0.26862954, 0.26130258, 0.27577711]
__lowercase : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowercase : Optional[int] = torchvision.transforms.Resize(224 )
__lowercase : List[Any] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.resize(__a )
__lowercase : Tuple = self.center_crop(__a )
__lowercase : Any = self.normalize(__a )
return images
def __call__( self : Any , __a : Optional[Any]=None , __a : List[Any]=None , **__a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.tokenizer(text=__a , **__a )
__lowercase : List[str] = self.preprocess_img(__a )
__lowercase : Tuple = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __a : Tuple=10 , __a : Optional[int]=0.01 , __a : Optional[Any]=None , __a : Any=None , __a : List[str]=None , __a : Optional[Any]=None , __a : Optional[int]=None , __a : List[str]=None , __a : Optional[Any]=False , __a : int=True , __a : str="image" , __a : List[str]=True , __a : Tuple=False , __a : Optional[Any]=False , __a : Dict=False , ) -> None:
"""simple docstring"""
super().__init__()
__lowercase : int = None
__lowercase : List[Any] = device if device else get_device()
if vqgan:
__lowercase : Union[str, Any] = vqgan
else:
__lowercase : Dict = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
__lowercase : Any = clip
else:
__lowercase : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__lowercase : Any = ProcessorGradientFlow(device=self.device )
__lowercase : List[Any] = iterations
__lowercase : List[Any] = lr
__lowercase : Union[str, Any] = log
__lowercase : List[Any] = make_grid
__lowercase : str = return_val
__lowercase : str = quantize
__lowercase : Dict = self.vqgan.decoder.z_shape
def lowerCAmelCase ( self : List[str] , __a : List[Any]=None , __a : int=None , __a : str=5 , __a : Union[str, Any]=True ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = []
if output_path is None:
__lowercase : Optional[int] = """./animation.gif"""
if input_path is None:
__lowercase : Any = self.save_path
__lowercase : Any = sorted(glob(input_path + """/*""" ) )
if not len(__a ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__a ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__lowercase : Any = total_duration / len(__a )
__lowercase : int = [frame_duration] * len(__a )
if extend_frames:
__lowercase : Optional[Any] = 1.5
__lowercase : Optional[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(F"gif saved to {output_path}" )
def lowerCAmelCase ( self : Dict , __a : int=None , __a : Dict=None ) -> Optional[int]:
"""simple docstring"""
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__lowercase : Dict = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
__lowercase : Optional[Any] = preprocess_vqgan(__a )
__lowercase , *__lowercase : Optional[int] = self.vqgan.encode(__a )
return z
def lowerCAmelCase ( self : str , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.latent.detach().requires_grad_()
__lowercase : Union[str, Any] = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase : List[Any] = self.vqgan.quantize(__a )
else:
__lowercase : Dict = trans_latent
return self.vqgan.decode(__a )
def lowerCAmelCase ( self : Tuple , __a : Optional[int] , __a : List[Any] , __a : int=None ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.clip_preprocessor(text=__a , images=__a , return_tensors="""pt""" , padding=__a )
__lowercase : Optional[int] = self.clip(**__a )
__lowercase : str = clip_outputs.logits_per_image
if weights is not None:
__lowercase : Union[str, Any] = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase ( self : str , __a : str , __a : Dict , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self._get_clip_similarity(pos_prompts["""prompts"""] , __a , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__lowercase : Dict = self._get_clip_similarity(neg_prompts["""prompts"""] , __a , weights=neg_prompts["""weights"""] )
else:
__lowercase : int = torch.tensor([1] , device=self.device )
__lowercase : Optional[int] = -torch.log(__a ) + torch.log(__a )
return loss
def lowerCAmelCase ( self : Any , __a : Dict , __a : Optional[int] , __a : Any ) -> str:
"""simple docstring"""
__lowercase : str = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
__lowercase : Union[str, Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase : Any = self._add_vector(__a )
__lowercase : Dict = loop_post_process(__a )
__lowercase : Union[str, Any] = self._get_CLIP_loss(__a , __a , __a )
print("""CLIP loss""" , __a )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase ( self : str , __a : str , __a : Any , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
wandb.init(reinit=__a , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__lowercase : str = Image.open(__a )
__lowercase : Optional[int] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__a ) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Tuple ) -> List[Any]:
"""simple docstring"""
if not prompts:
return []
__lowercase : List[str] = []
__lowercase : Any = []
if isinstance(__a , __a ):
__lowercase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
__lowercase : List[Any] = prompt[0]
__lowercase : Union[str, Any] = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase : Optional[int] = prompt.split(""":""" )
__lowercase : int = float(__a )
else:
__lowercase : Optional[int] = prompt
__lowercase : Any = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def lowerCAmelCase ( self : int , __a : Tuple , __a : int=None , __a : List[str]=None , __a : Optional[int]=True , __a : str=False , __a : List[Any]=True , __a : Optional[Any]=True , __a : Dict=None , ) -> str:
"""simple docstring"""
if image_path:
__lowercase : int = self._get_latent(__a )
else:
__lowercase : str = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase : int = self.process_prompts(__a )
__lowercase : Dict = self.process_prompts(__a )
if save_final and save_path is None:
__lowercase : Tuple = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
__lowercase : Any = save_path + """_""" + get_timestamp()
os.makedirs(__a )
__lowercase : Tuple = save_path
__lowercase : Tuple = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__a ) )
__lowercase : List[Any] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) ) | 149 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["ViTFeatureExtractor"]
lowerCamelCase__ : List[Any] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
import argparse
import os
import re
UpperCAmelCase_ = "src/diffusers"
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ = re.compile(r"\[([^\]]+)\]")
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = _re_indent.search(SCREAMING_SNAKE_CASE_ )
return "" if search is None else search.groups()[0]
def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ):
index += 1
_UpperCAmelCase = ['''\n'''.join(lines[:index] )]
else:
_UpperCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCAmelCase = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if index < len(SCREAMING_SNAKE_CASE_ ) - 1:
_UpperCAmelCase = [lines[index + 1]]
index += 1
else:
_UpperCAmelCase = []
else:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
def _inner(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE_ ).lower().replace('''_''' , '''''' )
return _inner
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=None ) -> int:
"""simple docstring"""
def noop(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return x
if key is None:
_UpperCAmelCase = noop
# Constants are all uppercase, they go first.
_UpperCAmelCase = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCAmelCase = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCAmelCase = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()]
_UpperCAmelCase = ignore_underscore(SCREAMING_SNAKE_CASE_ )
return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
def _replace(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
_UpperCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_UpperCAmelCase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]"
_UpperCAmelCase = import_statement.split('''\n''' )
if len(SCREAMING_SNAKE_CASE_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCAmelCase = 2 if lines[1].strip() == '''[''' else 1
_UpperCAmelCase = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCAmelCase = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )
_UpperCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCAmelCase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCAmelCase = keys[:-1]
_UpperCAmelCase = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] )
return "\n".join(SCREAMING_SNAKE_CASE_ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCAmelCase = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ )
return import_statement
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCAmelCase = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCAmelCase = main_blocks[block_idx]
_UpperCAmelCase = block.split('''\n''' )
# Get to the start of the imports.
_UpperCAmelCase = 0
while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCAmelCase = '''\n'''.join(block_lines[line_idx:-1] )
_UpperCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCAmelCase = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCAmelCase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCAmelCase = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCAmelCase = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None]
_UpperCAmelCase = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCAmelCase = 0
_UpperCAmelCase = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_UpperCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE_ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCAmelCase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE_ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ : List[str]=True ) -> int:
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
_UpperCAmelCase = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , check_only=SCREAMING_SNAKE_CASE_ )
if result:
_UpperCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(F'''Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCAmelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 32 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580 | 0 |
from __future__ import annotations
__A : List[str] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : Optional[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __UpperCamelCase ( _A : list[float] ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =len(_A )
for i in range(_A ):
lowerCamelCase_ =-1
for j in range(i + 1 , _A ):
if arr[i] < arr[j]:
lowerCamelCase_ =arr[j]
break
result.append(_A )
return result
def __UpperCamelCase ( _A : list[float] ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ =[]
for i, outer in enumerate(_A ):
lowerCamelCase_ =-1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase_ =inner
break
result.append(_A )
return result
def __UpperCamelCase ( _A : list[float] ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ =len(_A )
lowerCamelCase_ =[]
lowerCamelCase_ =[-1] * arr_size
for index in reversed(range(_A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase_ =stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : Any = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 75 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =simple_accuracy(_A , _A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ ={}
for id_pred, label in zip(_A , _A ):
lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowerCamelCase_ =id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ =zip(*_A )
lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" )
fas.append(_A )
lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
lowerCamelCase_ =float(sum(_A ) / len(_A ) )
lowerCamelCase_ =sum(_A ) / len(_A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _snake_case ( self )-> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase_ =[
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 75 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
__snake_case = [True] * n
__snake_case = False
__snake_case = False
__snake_case = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__snake_case = i * 2
while index < n:
__snake_case = False
__snake_case = index + i
__snake_case = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def __UpperCAmelCase ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case = prime_sieve(_UpperCAmelCase )
__snake_case = 0
__snake_case = 0
__snake_case = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case = primes[prime_index + 1]
__snake_case = last_prime**2
__snake_case = next_prime**2
# Get numbers divisible by lps(current)
__snake_case = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 69 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Any = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __A : List[str] , __A : Optional[Any] , __A : Any ) -> Dict:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowercase ( __A : Optional[int] , __A : int , __A : List[str] , __A : List[str]="attention" ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
snake_case : int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case : str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
snake_case : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case : Union[str, Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
snake_case : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case : Union[str, Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
snake_case : Any = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase ( __A : Dict , __A : Dict , __A : Union[str, Any] , __A : Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if split_mlp_wi:
snake_case : Any = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
snake_case : Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
snake_case : Optional[Any] = (wi_a, wi_a)
else:
snake_case : Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
snake_case : List[Any] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowercase ( __A : int , __A : Tuple , __A : Optional[Any] , __A : List[str] ) -> int:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowercase ( __A : dict , *, __A : int , __A : bool , __A : bool = False ) -> Tuple:
'''simple docstring'''
snake_case : int = traverse_util.flatten_dict(variables["""target"""] )
snake_case : Tuple = {"""/""".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case : Tuple = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __A )
snake_case : Optional[int] = collections.OrderedDict()
# Shared embeddings.
snake_case : Optional[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
snake_case : List[Any] = tax_layer_norm_lookup(__A , __A , """encoder""" , """pre_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case : Optional[int] = tax_attention_lookup(__A , __A , """encoder""" , """attention""" )
snake_case : List[str] = layer_norm
snake_case : Tuple = k.T
snake_case : Union[str, Any] = o.T
snake_case : Optional[Any] = q.T
snake_case : Optional[int] = v.T
# Block i, layer 1 (MLP).
snake_case : Union[str, Any] = tax_layer_norm_lookup(__A , __A , """encoder""" , """pre_mlp_layer_norm""" )
snake_case , snake_case : Optional[int] = tax_mlp_lookup(__A , __A , """encoder""" , __A )
snake_case : Tuple = layer_norm
if split_mlp_wi:
snake_case : List[Any] = wi[0].T
snake_case : List[Any] = wi[1].T
else:
snake_case : Optional[int] = wi.T
snake_case : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case : Tuple = tax_relpos_bias_lookup(
__A , __A , """encoder""" ).T
snake_case : Any = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case : Dict = tax_relpos_bias_lookup(
__A , 0 , """encoder""" ).T
snake_case : Tuple = tax_relpos_bias_lookup(
__A , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
snake_case : Dict = tax_layer_norm_lookup(__A , __A , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case : Optional[int] = tax_attention_lookup(__A , __A , """decoder""" , """self_attention""" )
snake_case : Union[str, Any] = layer_norm
snake_case : int = k.T
snake_case : Union[str, Any] = o.T
snake_case : List[Any] = q.T
snake_case : List[str] = v.T
# Block i, layer 1 (Cross Attention).
snake_case : List[str] = tax_layer_norm_lookup(__A , __A , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case , snake_case , snake_case , snake_case : Tuple = tax_attention_lookup(__A , __A , """decoder""" , """encoder_decoder_attention""" )
snake_case : Optional[int] = layer_norm
snake_case : Dict = k.T
snake_case : List[Any] = o.T
snake_case : int = q.T
snake_case : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
snake_case : str = tax_layer_norm_lookup(__A , __A , """decoder""" , """pre_mlp_layer_norm""" )
snake_case , snake_case : Optional[Any] = tax_mlp_lookup(__A , __A , """decoder""" , __A )
snake_case : Dict = layer_norm
if split_mlp_wi:
snake_case : Optional[Any] = wi[0].T
snake_case : int = wi[1].T
else:
snake_case : List[Any] = wi.T
snake_case : List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case : Optional[Any] = tax_relpos_bias_lookup(__A , __A , """decoder""" ).T
snake_case : Union[str, Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case : Any = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase ( __A : List[str] , __A : bool ) -> Dict:
'''simple docstring'''
snake_case : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case : Tuple = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case : Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case : int = state_dict["""shared.weight"""]
return state_dict
def lowercase ( __A : Optional[Any] , __A : str , __A : Tuple , __A : Any , __A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = checkpoints.load_tax_checkpoint(__A )
snake_case : Any = convert_tax_to_pytorch(
__A , num_layers=config.num_layers , is_encoder_only=__A , scalable_attention=__A )
snake_case : Dict = make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def lowercase ( __A : str , __A : Union[str, Any] , __A : int , __A : bool = False , __A : bool = False , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = MTaConfig.from_json_file(__A )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case : Any = UMTaEncoderModel(__A )
else:
snake_case : int = UMTaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A , __A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print("""Done""" )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowercase : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 315 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Tuple = {"""vocab_file""": """spiece.model"""}
snake_case__ : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
snake_case__ : Any = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class snake_case ( __snake_case ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase__ : Optional[int] = []
def __init__( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : int="[SEP]" , lowerCamelCase_ : List[str]="[MASK]" , lowerCamelCase_ : Optional[int]="[CLS]" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Union[str, Any] , ) ->None:
'''simple docstring'''
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def UpperCAmelCase ( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
UpperCAmelCase__ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) ->Dict:
'''simple docstring'''
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ) ->Dict:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] ) ->Any:
'''simple docstring'''
UpperCAmelCase__ = self.sp_model.IdToPiece(lowerCamelCase_ )
return token
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict ) ->int:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
UpperCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
UpperCAmelCase__ = True
UpperCAmelCase__ = []
else:
current_sub_tokens.append(lowerCamelCase_ )
UpperCAmelCase__ = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Union[str, Any] , ) ->str:
'''simple docstring'''
UpperCAmelCase__ = kwargs.pop("""use_source_tokenizer""" , lowerCamelCase_ )
UpperCAmelCase__ = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
UpperCAmelCase__ = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase__ = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(lowerCamelCase_ ) )
else:
UpperCAmelCase__ = "".join(lowerCamelCase_ )
UpperCAmelCase__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase__ = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCAmelCase ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 392 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
_A = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size for training."} )
_A = field(default=2 , metadata={"help": "Batch size for evaluation."} )
_A = field(default=0.1 , metadata={"help": "Value of weight decay."} )
_A = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_A = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
_A = field(default="cosine" , metadata={"help": "Learning rate."} )
_A = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_A = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
_A = field(
default=__snake_case , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_A = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
_A = field(default=1 , metadata={"help": "Training seed."} )
_A = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
_A = field(
default=__snake_case , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_A = field(default=__snake_case , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
_A = field(
default=__snake_case , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
_A = field(
default=__snake_case , metadata={"help": "Sample from the language model's output distribution."} )
_A = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
_A = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
_A = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
_A = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
_A = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
_A = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_A = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowercase :
_A = field(
default=__snake_case , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
_A = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
_A = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
_A = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_A = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_A = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_A = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_A = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
_A = field(
default=__snake_case , metadata={"help": "If True, near-duplicate samples are removed."} )
_A = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
_A = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
_A = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
_A = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
_A = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
_A = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
| 461 | 0 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 477 |
import requests
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> None:
_snake_case : Union[str, Any] = {"""Content-Type""": """application/json"""}
_snake_case : Tuple = requests.post(SCREAMING_SNAKE_CASE__ , json={"""text""": message_body} , headers=SCREAMING_SNAKE_CASE__ )
if response.status_code != 200:
_snake_case : Any = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 477 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> None:
"""simple docstring"""
if components is None:
a__ : List[str] = []
a__ : Optional[int] = list(__a )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self ) -> str:
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components ) ) + ")"
def __add__( self , snake_case ) -> Vector:
"""simple docstring"""
a__ : Optional[Any] = len(self )
if size == len(__a ):
a__ : Optional[int] = [self.__components[i] + other.component(__a ) for i in range(__a )]
return Vector(__a )
else:
raise Exception("must have the same size" )
def __sub__( self , snake_case ) -> Vector:
"""simple docstring"""
a__ : Optional[Any] = len(self )
if size == len(__a ):
a__ : Optional[int] = [self.__components[i] - other.component(__a ) for i in range(__a )]
return Vector(__a )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , snake_case ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self , snake_case ) -> float:
"""simple docstring"""
...
def __mul__( self , snake_case ) -> float | Vector:
"""simple docstring"""
if isinstance(__a , (float, int) ):
a__ : str = [c * other for c in self.__components]
return Vector(__a )
elif isinstance(__a , __a ) and len(self ) == len(__a ):
a__ : List[Any] = len(self )
a__ : Dict = [self.__components[i] * other.component(__a ) for i in range(__a )]
return sum(__a )
else: # error case
raise Exception("invalid operand!" )
def _snake_case ( self ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def _snake_case ( self , snake_case ) -> float:
"""simple docstring"""
if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _snake_case ( self , snake_case , snake_case ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
a__ : int = value
def _snake_case ( self ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
a__ : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(__a ) )
def _snake_case ( self , snake_case , snake_case = False ) -> float:
"""simple docstring"""
a__ : Tuple = self * other
a__ : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _A ( lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
return Vector([0] * dimension )
def _A ( lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (isinstance(lowerCamelCase , lowerCamelCase ))
a__ : Any = [0] * dimension
a__ : int = 1
return Vector(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (isinstance(lowerCamelCase , (int, float) ))
)
return x * scalar + y
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
random.seed(lowerCamelCase )
a__ : List[Any] = [random.randint(lowerCamelCase , lowerCamelCase ) for _ in range(lowerCamelCase )]
return Vector(lowerCamelCase )
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case , snake_case ) -> None:
"""simple docstring"""
a__ : Union[str, Any] = matrix
a__ : int = w
a__ : str = h
def __str__( self ) -> str:
"""simple docstring"""
a__ : Dict = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , snake_case ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a__ : Tuple = []
for i in range(self.__height ):
a__ : List[Any] = [
self.__matrix[i][j] + other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , snake_case ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a__ : str = []
for i in range(self.__height ):
a__ : List[str] = [
self.__matrix[i][j] - other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , snake_case ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self , snake_case ) -> Vector:
"""simple docstring"""
...
def __mul__( self , snake_case ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__a , __a ): # matrix-vector
if len(__a ) == self.__width:
a__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
a__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(__a )
for j in range(self.__width )
]
ans.change_component(__a , sum(__a ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__a , (int, float) ): # matrix-scalar
a__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__a , self.__width , self.__height )
return None
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.__height
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.__width
def _snake_case ( self , snake_case , snake_case ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
a__ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _snake_case ( self , snake_case , snake_case ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
a__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a ) ):
a__ : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant()
def _snake_case ( self , snake_case , snake_case ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a )
else:
raise Exception("Indices out of bounds" )
def _snake_case ( self ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a__ : Any = [
self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width )
]
return sum(__a )
def _A ( lowerCamelCase ):
a__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase )]
return Matrix(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
random.seed(lowerCamelCase )
a__ : list[list[float]] = [
[random.randint(lowerCamelCase , lowerCamelCase ) for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase )
]
return Matrix(lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 717 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : Dict , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Any , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCAmelCase_ = self.unet.config.sample_size
UpperCAmelCase_ = (batch_size, 3, img_size, img_size)
UpperCAmelCase_ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase_ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase_ = self.scheduler.schedule[t]
UpperCAmelCase_ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase_ , UpperCAmelCase_ = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase_ = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output["derivative"] , )
UpperCAmelCase_ = step_output.prev_sample
UpperCAmelCase_ = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 82 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase ={
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__lowerCAmelCase =AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"HTSAT-tiny" , "roberta" , _lowerCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R".*sequential.(\d+).*"
UpperCAmelCase = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_lowerCAmelCase , _lowerCAmelCase )
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
# replace sequential layers with list
UpperCAmelCase = re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_lowerCAmelCase )//3}.linear.''' )
elif re.match(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = int(re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_lowerCAmelCase , enable_fusion=_lowerCAmelCase )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_lowerCAmelCase )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
transformers_config.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__lowerCAmelCase =parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 333 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
lowerCAmelCase : Optional[datasets.Features] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCAmelCase : Dict = PandasConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , UpperCAmelCase__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__ , (str, list, tuple) ):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self , UpperCAmelCase__ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , UpperCAmelCase__ ):
for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
with open(UpperCAmelCase__ , "rb" ) as f:
A__ = pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase__ ) )
yield i, self._cast_table(UpperCAmelCase__ )
| 232 |
UpperCAmelCase_ : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Dict = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def UpperCamelCase ( _A : int , _A : int , _A : int )-> str:
"""simple docstring"""
assert len(str(_A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A__ = year // 100
A__ = (5 * (century % 4) + 2) % 7
A__ = year % 100
A__ = centurian % 12
A__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase__ = HUGGINGFACE_HUB_CACHE
lowerCamelCase__ = """config.json"""
lowerCamelCase__ = """diffusion_pytorch_model.bin"""
lowerCamelCase__ = """diffusion_flax_model.msgpack"""
lowerCamelCase__ = """model.onnx"""
lowerCamelCase__ = """diffusion_pytorch_model.safetensors"""
lowerCamelCase__ = """weights.pb"""
lowerCamelCase__ = """https://huggingface.co"""
lowerCamelCase__ = default_cache_path
lowerCamelCase__ = """diffusers_modules"""
lowerCamelCase__ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase__ = ["""fp16""", """non-ema"""]
lowerCamelCase__ = """.self_attn"""
| 455 |
"""simple docstring"""
import numpy as np
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _snake_case ( __snake_case : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
__A = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 62 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase_ ):
print(F"""{i}\t\t{d}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for j in range(lowerCamelCase_ ):
lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = [float("inf" )] * vertex_count
lowercase__ : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase_ ):
lowercase__ : Any = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowercase__ : int = distance[u] + w
lowercase__ : Dict = check_negative_cycle(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {"src": src, "dst": dest, "weight": weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 496 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 1000 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = 1, 1
UpperCAmelCase__ : List[str] = []
for i in range(1 , n + 1 ):
UpperCAmelCase__ : List[str] = prev_numerator + 2 * prev_denominator
UpperCAmelCase__ : List[Any] = prev_numerator + prev_denominator
if len(str(__UpperCamelCase ) ) > len(str(__UpperCamelCase ) ):
result.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = numerator
UpperCAmelCase__ : int = denominator
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 194 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = """timm_backbone"""
def __init__( self : List[str] ,A : Any=None ,A : List[Any]=3 ,A : Any=True ,A : Union[str, Any]=True ,A : List[Any]=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : Optional[int] = backbone
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[int] = features_only
UpperCAmelCase__ : Tuple = use_pretrained_backbone
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,)
| 194 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCamelCase : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase : Optional[Any] = model(A_ )["last_hidden_state"]
UpperCamelCase : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[Any] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 629 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : Union[str, Any] = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Tuple = is_training
UpperCamelCase : int = use_labels
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Any = mask_ratio
UpperCamelCase : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = ViTMAEModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ )
UpperCamelCase : Any = (self.image_size // self.patch_size) ** 2
UpperCamelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase : str = 1
UpperCamelCase : Union[str, Any] = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(A_ )
UpperCamelCase : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Any = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_UpperCAmelCase :int = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Dict = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ViTMAEModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(A_ )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase : Tuple = torch.from_numpy(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase : Any = pt_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(A_ )
model.to(A_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : Any = outputs[0].cpu().numpy()
UpperCamelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
UpperCamelCase : Optional[int] = model_class.from_pretrained(A_ )
model.to(A_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : Tuple = model(**self._prepare_for_class(A_ , A_ ) )
# Make sure we don't have nans
UpperCamelCase : Dict = after_outputs[0].cpu().numpy()
UpperCamelCase : int = 0
UpperCamelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[str] = ViTMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> Optional[int]:
UpperCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(A_ )
UpperCamelCase : Any = self.default_image_processor
UpperCamelCase : Optional[Any] = prepare_img()
UpperCamelCase : int = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase : Dict = ViTMAEConfig()
UpperCamelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**A_ , noise=torch.from_numpy(A_ ).to(device=A_ ) )
# verify the logits
UpperCamelCase : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : str = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A_ ) , atol=1e-4 ) )
| 629 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''hf-internal-testing/tiny-random-t5'''
UpperCamelCase__ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCamelCase__ :Optional[int] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase__ :int = model.generate(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase__ :Tuple = model_reloaded.generate(**UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''hf-internal-testing/tiny-random-t5'''
UpperCamelCase__ :Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase_ ):
model.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase_ ) | 700 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a , __a ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__a , __a , __a=0 , __a=None ):
UpperCamelCase__ :Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ :List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ :Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ :str = (output_size, output_size) if isinstance(__a , __a ) else output_size
UpperCamelCase__ , UpperCamelCase__ :int = get_image_size(__a )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = output_size
# determine new height and width
UpperCamelCase__ :List[str] = output_height / input_height
UpperCamelCase__ :str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ :str = scale_width
else:
# fit height
UpperCamelCase__ :int = scale_height
UpperCamelCase__ :Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
UpperCamelCase__ :List[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class lowercase ( A__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCamelCase__ :Dict = get_size_dict(UpperCamelCase_ )
UpperCamelCase__ :Dict = do_resize
UpperCamelCase__ :Union[str, Any] = size
UpperCamelCase__ :List[Any] = keep_aspect_ratio
UpperCamelCase__ :Optional[int] = ensure_multiple_of
UpperCamelCase__ :Union[str, Any] = resample
UpperCamelCase__ :str = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :List[str] = do_normalize
UpperCamelCase__ :List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ :int = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :Any = size if size is not None else self.size
UpperCamelCase__ :List[str] = get_size_dict(UpperCamelCase_ )
UpperCamelCase__ :Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ :str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ :Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase__ :str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :List[str] = image_std if image_std is not None else self.image_std
UpperCamelCase__ :List[str] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ :str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ :int = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ :str = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase__ :Optional[int] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase__ :List[str] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = target_sizes.numpy()
UpperCamelCase__ :int = []
for idx in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
UpperCamelCase__ :Any = logits.argmax(dim=1 )
UpperCamelCase__ :Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 280 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase =hidden_states.shape
_lowercase =jax.image.resize(
lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_lowercase =self.conv(lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.conv(lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = None
_a = 0.0
_a = None
_a = jnp.floataa
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.in_channels if self.out_channels is None else self.out_channels
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =nn.Dense(lowerCAmelCase , dtype=self.dtype )
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Dropout(self.dropout_prob )
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowercase =None
if use_nin_shortcut:
_lowercase =nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ) -> Tuple:
'''simple docstring'''
_lowercase =hidden_states
_lowercase =self.norma(lowerCAmelCase )
_lowercase =nn.swish(lowerCAmelCase )
_lowercase =self.conva(lowerCAmelCase )
_lowercase =self.time_emb_proj(nn.swish(lowerCAmelCase ) )
_lowercase =jnp.expand_dims(jnp.expand_dims(lowerCAmelCase , 1 ) , 1 )
_lowercase =hidden_states + temb
_lowercase =self.norma(lowerCAmelCase )
_lowercase =nn.swish(lowerCAmelCase )
_lowercase =self.dropout(lowerCAmelCase , lowerCAmelCase )
_lowercase =self.conva(lowerCAmelCase )
if self.conv_shortcut is not None:
_lowercase =self.conv_shortcut(lowerCAmelCase )
return hidden_states + residual
| 291 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def snake_case ( ):
UpperCAmelCase_ : Dict = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Tuple = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ ,snake_case_ ) )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = set()
UpperCAmelCase_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Union[str, Any] = char
return pairs
class UpperCamelCase_ (UpperCamelCase_ ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]="replace" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Union[str, Any]="<s>" , lowerCAmelCase_ : Optional[int]="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Optional[Any]="<mask>" , lowerCAmelCase_ : Optional[Any]=False , **lowerCAmelCase_ : List[Any] , ) -> Tuple:
UpperCAmelCase_ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
UpperCAmelCase_ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
UpperCAmelCase_ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
UpperCAmelCase_ : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
UpperCAmelCase_ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : int = json.load(__A )
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : List[Any] = bytes_to_unicode()
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : List[Any] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : int = dict(zip(__A , range(len(__A ) ) ) )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] ) -> int:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : int = tuple(__A )
UpperCAmelCase_ : Union[str, Any] = get_pairs(__A )
if not pairs:
return token
while True:
UpperCAmelCase_ : Any = min(__A , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ : int = bigram
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = 0
while i < len(__A ):
try:
UpperCAmelCase_ : int = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : List[Any] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[Any] = tuple(__A )
UpperCAmelCase_ : int = new_word
if len(__A ) == 1:
break
else:
UpperCAmelCase_ : Tuple = get_pairs(__A )
UpperCAmelCase_ : Tuple = " ".join(__A )
UpperCAmelCase_ : Optional[Any] = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : int = []
for token in re.findall(self.pat , __A ):
UpperCAmelCase_ : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> List[str]:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> Union[str, Any]:
return self.decoder.get(__A )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Tuple = "".join(__A )
UpperCAmelCase_ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Dict:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Optional[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
UpperCAmelCase_ : Any = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : List[Any] = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> Tuple:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any=False , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : List[Any] = " " + text
return (text, kwargs)
| 703 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ShapEPipeline
__magic_name__ = ['''prompt''']
__magic_name__ = ['''prompt''']
__magic_name__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__magic_name__ = False
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase_ : List[Any] = PriorTransformer(**lowerCAmelCase_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : Tuple = ShapERenderer(**lowerCAmelCase_ )
return model
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : str = self.dummy_prior
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : Any = self.dummy_tokenizer
UpperCAmelCase_ : int = self.dummy_renderer
UpperCAmelCase_ : Optional[int] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase_ , clip_sample=lowerCAmelCase_ , clip_sample_range=1.0 , )
UpperCAmelCase_ : str = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Any=0 ) -> Union[str, Any]:
if str(lowerCAmelCase_ ).startswith("mps" ):
UpperCAmelCase_ : str = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
UpperCAmelCase_ : Any = "cpu"
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : int = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
UpperCAmelCase_ : Dict = output.images[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Optional[int] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = torch_device == "cpu"
UpperCAmelCase_ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : int = batch_size * [inputs[key]]
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
UpperCAmelCase_ : str = ShapEPipeline.from_pretrained("openai/shap-e" )
UpperCAmelCase_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(
"a shark" , generator=lowerCAmelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 463 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCAmelCase ( __a):
__a : torch.FloatTensor
class _UpperCAmelCase ( __a , __a):
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (64,) , _A = 1 , _A = "silu" , _A = 3 , _A = 32 , _A = 2_56 , _A = 32 , _A = None , _A = 0.18215 , _A = "group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_UpperCAmelCase : int = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
_UpperCAmelCase : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCAmelCase : List[Any] = nn.Convad(_A , _A , 1 )
_UpperCAmelCase : List[Any] = VectorQuantizer(_A , _A , beta=0.25 , remap=_A , sane_index_shape=_A )
_UpperCAmelCase : Optional[Any] = nn.Convad(_A , _A , 1 )
# pass init params to Decoder
_UpperCAmelCase : Union[str, Any] = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def __snake_case ( self , _A , _A = True ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.encoder(_A )
_UpperCAmelCase : List[str] = self.quant_conv(_A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A )
@apply_forward_hook
def __snake_case ( self , _A , _A = False , _A = True ) -> Optional[Any]:
'''simple docstring'''
if not force_not_quantize:
_UpperCAmelCase : int = self.quantize(_A )
else:
_UpperCAmelCase : Union[str, Any] = h
_UpperCAmelCase : Optional[Any] = self.post_quant_conv(_A )
_UpperCAmelCase : Any = self.decoder(_A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def __snake_case ( self , _A , _A = True ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = sample
_UpperCAmelCase : Union[str, Any] = self.encode(_A ).latents
_UpperCAmelCase : Optional[int] = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 238 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Optional[int] = downstream_dict["projector.weight"]
lowercase_ : str = downstream_dict["projector.bias"]
lowercase_ : int = downstream_dict["model.post_net.linear.weight"]
lowercase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : Any = downstream_dict["model.linear.weight"]
lowercase_ : List[str] = downstream_dict["model.linear.bias"]
return model
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
lowercase_ : str = downstream_dict["connector.weight"]
lowercase_ : List[str] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase_ : Union[str, Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase_ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : Dict = checkpoint["Downstream"]
lowercase_ : Optional[Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
lowercase_ : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowercase_ : Any = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
lowercase_ : Optional[int] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith("ForXVector" ):
lowercase_ : List[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase_ : List[str] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 620 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'gpt_neo'
lowercase__ = ['past_key_values']
lowercase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , __a=5_02_57 , __a=20_48 , __a=20_48 , __a=24 , __a=[[["global", "local"], 12]] , __a=16 , __a=None , __a=2_56 , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.1 , __a=1e-5 , __a=0.02 , __a=True , __a=5_02_56 , __a=5_02_56 , **__a , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = window_size
_UpperCamelCase = activation_function
_UpperCamelCase = resid_dropout
_UpperCamelCase = embed_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = use_cache
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = attention_types
_UpperCamelCase = self.expand_attention_types_params(__a)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''')
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a)
@staticmethod
def UpperCAmelCase ( __a) -> int:
'''simple docstring'''
_UpperCamelCase = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
import torch
_UpperCamelCase = input.size()
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = shape[dimension]
_UpperCamelCase = torch.arange(0, __snake_case, __snake_case )
_UpperCamelCase = torch.div(sizedim - size, __snake_case, rounding_mode='''floor''' ) + 1
_UpperCamelCase = torch.arange(__snake_case ) + low_indices[:min_length][:, None]
_UpperCamelCase = [slice(__snake_case )] * rank
_UpperCamelCase = indices
_UpperCamelCase = input[s]
_UpperCamelCase = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
import torch
_UpperCamelCase = torch.arange(1, __snake_case )
_UpperCamelCase = torch.remainder(__snake_case, __snake_case )
_UpperCamelCase = remainders == 0
_UpperCamelCase = candidates[divisor_indices]
_UpperCamelCase = torch.max(__snake_case )
return largest_divisor, torch.div(__snake_case, __snake_case, rounding_mode='''floor''' )
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__a , direction='''inputs''')
_UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self._config.num_heads
def UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = super(__a , self).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a)
# We need to order the input in the way they appears in the forward()
_UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_UpperCamelCase = common_inputs['''attention_mask''']
if self.use_past:
_UpperCamelCase = ordered_inputs['''attention_mask'''].dtype
_UpperCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a)] , dim=1)
return ordered_inputs
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 13
| 78 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__a , __a):
_UpperCamelCase = v.to_dict()
return d
| 78 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class _snake_case :
UpperCamelCase__ : List[Any] =4_2
UpperCamelCase__ : int =field(default_factory=__UpperCAmelCase)
UpperCamelCase__ : Dict =field(default_factory=__UpperCAmelCase)
def A__ ( self : List[str], __lowercase : str, __lowercase : str, __lowercase : int ):
lowercase__ = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_, nn.Convad ) or isinstance(UpperCamelCase_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : str, __lowercase : List[str] ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self : Optional[int] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __lowercase : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class _snake_case :
UpperCamelCase__ : int =4_2
UpperCamelCase__ : Union[str, Any] =4_2
UpperCamelCase__ : int =0
UpperCamelCase__ : int =field(default_factory=__UpperCAmelCase)
UpperCamelCase__ : str =field(default_factory=__UpperCAmelCase)
def __call__( self : Any, __lowercase : Optional[int] ):
lowercase__ = Tracker(self.dest )(UpperCamelCase_ ).parametrized
lowercase__ = Tracker(self.src )(UpperCamelCase_ ).parametrized
lowercase__ = list(filter(lambda __lowercase : type(UpperCamelCase_ ) not in self.src_skip, UpperCamelCase_ ) )
lowercase__ = list(filter(lambda __lowercase : type(UpperCamelCase_ ) not in self.dest_skip, UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while'''
F''' destination module has {len(UpperCamelCase_ )}.''' )
for dest_m, src_m in zip(UpperCamelCase_, UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
lowercase__ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
lowercase__ = ResNetForImageClassification(__UpperCamelCase ).eval()
lowercase__ = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
lowercase__ = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
lowercase__ = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
lowercase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__UpperCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True ):
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = 1000
lowercase__ = (1, num_labels)
lowercase__ = "huggingface/label-files"
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
lowercase__ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 413 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
__magic_name__ = start
__magic_name__ = end
__magic_name__ = val
__magic_name__ = (start + end) // 2
__magic_name__ = left
__magic_name__ = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = collection
__magic_name__ = function
if self.collection:
__magic_name__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__magic_name__ = (start + end) // 2
__magic_name__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == i:
__magic_name__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
if self.root is not None:
__magic_name__ = Queue()
queue.put(self.root )
while not queue.empty():
__magic_name__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 490 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : int ,snake_case__ : Union[str, Any] ,snake_case__ : List[str] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCamelCase ( snake_case__ : List[str] ,snake_case__ : Union[str, Any] ,snake_case__ : Optional[Any] ,snake_case__ : Union[str, Any] ,snake_case__ : int=True ):
'''simple docstring'''
model.train()
__snake_case :Optional[int] = model(lowerCAmelCase_ )
__snake_case :Any = F.mse_loss(lowerCAmelCase_ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase_ )
def UpperCamelCase ( snake_case__ : Tuple ,snake_case__ : List[str]=False ):
'''simple docstring'''
set_seed(42 )
__snake_case :Optional[Any] = RegressionModel()
__snake_case :Dict = deepcopy(lowerCAmelCase_ )
__snake_case :Union[str, Any] = RegressionDataset(length=80 )
__snake_case :List[Any] = DataLoader(lowerCAmelCase_ ,batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case :List[str] = AdamW(params=model.parameters() ,lr=1e-3 )
__snake_case :str = AdamW(params=ddp_model.parameters() ,lr=1e-3 )
__snake_case :str = LambdaLR(lowerCAmelCase_ ,lr_lambda=lambda snake_case__ : epoch**0.6_5 )
__snake_case :Any = LambdaLR(lowerCAmelCase_ ,lr_lambda=lambda snake_case__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__snake_case :str = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
__snake_case :str = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :Dict = get_training_setup(lowerCAmelCase_ )
# Use a single batch
__snake_case :Any = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case :int = accelerator.gather((ddp_input, ddp_target) )
__snake_case :Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case :Dict = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def UpperCamelCase ( snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :int = get_training_setup(lowerCAmelCase_ )
# Use a single batch
__snake_case :int = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case :Optional[int] = accelerator.gather((ddp_input, ddp_target) )
__snake_case :Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case :List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def UpperCamelCase ( snake_case__ : int=False ,snake_case__ : List[str]=False ):
'''simple docstring'''
__snake_case :Any = Accelerator(
split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case :Union[str, Any] = get_training_setup(lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
__snake_case :Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case :List[Any] = accelerator.gather((ddp_input, ddp_target) )
__snake_case :Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case :str = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
GradientState._reset_state()
def UpperCamelCase ( snake_case__ : Optional[Any]=False ,snake_case__ : Tuple=False ):
'''simple docstring'''
__snake_case :Union[str, Any] = Accelerator(
split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case :int = get_training_setup(lowerCAmelCase_ ,lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
__snake_case :Dict = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case :Any = accelerator.gather((ddp_input, ddp_target) )
__snake_case :List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__snake_case :Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :int = Accelerator()
__snake_case :Optional[int] = RegressionDataset(length=80 )
__snake_case :Tuple = DataLoader(lowerCAmelCase_ ,batch_size=16 )
__snake_case :Optional[int] = RegressionDataset(length=96 )
__snake_case :Tuple = DataLoader(lowerCAmelCase_ ,batch_size=16 )
__snake_case :Optional[int] = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if iteration < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if batch_num < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Dict = Accelerator()
__snake_case :Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation(lowerCAmelCase_ ,lowerCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ ,lowerCAmelCase_ )
def UpperCamelCase ( snake_case__ : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 703 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase__ = """pytorch_model.bin"""
lowerCamelCase__ = """pytorch_model.bin.index.json"""
lowerCamelCase__ = """adapter_config.json"""
lowerCamelCase__ = """adapter_model.bin"""
lowerCamelCase__ = """adapter_model.safetensors"""
lowerCamelCase__ = """tf_model.h5"""
lowerCamelCase__ = """tf_model.h5.index.json"""
lowerCamelCase__ = """model.ckpt"""
lowerCamelCase__ = """flax_model.msgpack"""
lowerCamelCase__ = """flax_model.msgpack.index.json"""
lowerCamelCase__ = """model.safetensors"""
lowerCamelCase__ = """model.safetensors.index.json"""
lowerCamelCase__ = """config.json"""
lowerCamelCase__ = """preprocessor_config.json"""
lowerCamelCase__ = FEATURE_EXTRACTOR_NAME
lowerCamelCase__ = """generation_config.json"""
lowerCamelCase__ = """modelcard.json"""
lowerCamelCase__ = """▁"""
lowerCamelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCamelCase ( snake_case__ : Dict ):
'''simple docstring'''
if version.parse(snake_case__ ) < version.parse(snake_case__ ):
if "dev" in min_version:
__snake_case :Tuple = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
__snake_case :List[str] = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 291 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class _A :
lowercase__: str
lowercase__: str = None
@staticmethod
def lowercase__ ( ) -> Dict:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , **__magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self : int , __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> int:
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class _A ( __lowercase ):
lowercase__: str = '''optuna'''
@staticmethod
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
return is_optuna_available()
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , **__magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_optuna(__magic_name__ )
class _A ( __lowercase ):
lowercase__: Dict = '''ray'''
lowercase__: Optional[Any] = '''\'ray[tune]\''''
@staticmethod
def lowercase__ ( ) -> str:
"""simple docstring"""
return is_ray_available()
def lowercase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : str , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_ray(__magic_name__ )
class _A ( __lowercase ):
lowercase__: str = '''sigopt'''
@staticmethod
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
return is_sigopt_available()
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : str , **__magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : Dict ) -> int:
"""simple docstring"""
return default_hp_space_sigopt(__magic_name__ )
class _A ( __lowercase ):
lowercase__: Optional[int] = '''wandb'''
@staticmethod
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
return is_wandb_available()
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , **__magic_name__ : Any ) -> int:
"""simple docstring"""
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_wandb(__magic_name__ )
__UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _a ( ) -> str:
"""simple docstring"""
__snake_case : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
__snake_case : Union[str, Any] = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F'''{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 26 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 498 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a__:
a_ : int
a_ : int
class a__:
def __init__( self , _UpperCAmelCase ) -> int:
snake_case__ =[[] for _ in range(_UpperCAmelCase )]
snake_case__ =size
def __getitem__( self , _UpperCAmelCase ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> Dict:
return self._size
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int | None:
snake_case__ =deque([start_vertex] )
snake_case__ =[None] * self.size
snake_case__ =0
while queue:
snake_case__ =queue.popleft()
snake_case__ =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case__ =current_distance + edge.weight
snake_case__ =distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
snake_case__ =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 581 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : Dict = '''pix2struct_text_model'''
a_ : Optional[int] = ['''past_key_values''']
a_ : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=5_0244 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =d_kv
snake_case__ =d_ff
snake_case__ =num_layers
snake_case__ =num_heads
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =dropout_rate
snake_case__ =layer_norm_epsilon
snake_case__ =initializer_factor
snake_case__ =use_cache
snake_case__ =eos_token_id
snake_case__ =decoder_start_token_id
# for backwards compatibility
snake_case__ =dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : List[Any] = '''pix2struct_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=768 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=1.0 , _UpperCAmelCase=4096 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , **_UpperCAmelCase , ) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =patch_embed_hidden_size
snake_case__ =d_ff
snake_case__ =dropout_rate
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =initializer_range
snake_case__ =initializer_factor
snake_case__ =attention_dropout
snake_case__ =layer_norm_eps
snake_case__ =dense_act_fn
snake_case__ =seq_len
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =d_kv
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : Dict = '''pix2struct'''
a_ : Optional[int] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
snake_case__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case__ =PixaStructTextConfig(**_UpperCAmelCase )
snake_case__ =PixaStructVisionConfig(**_UpperCAmelCase )
snake_case__ =self.text_config.decoder_start_token_id
snake_case__ =self.text_config.pad_token_id
snake_case__ =self.text_config.eos_token_id
snake_case__ =initializer_factor
snake_case__ =initializer_range
snake_case__ =self.initializer_range
snake_case__ =self.initializer_range
snake_case__ =is_vqa
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(self.__dict__ )
snake_case__ =self.text_config.to_dict()
snake_case__ =self.vision_config.to_dict()
snake_case__ =self.__class__.model_type
return output
| 581 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = hf_hub_url(repo_id=__A , path=__A , revision=__A )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__A )}'
| 331 |
"""simple docstring"""
import re
def __snake_case ( __A ) -> str:
if len(re.findall("""[ATCG]""" ,__A ) ) != len(__A ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" ,"""TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 607 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
return "".join([hex(lowerCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase_ )] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(lowerCAmelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 627 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, 'src', 'diffusers')
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = find_backend(' if not is_torch_available():')
self.assertEqual(UpperCAmelCase_ , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCamelCase__ : int = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(UpperCAmelCase_ , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCamelCase__ : Any = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(UpperCAmelCase_ , 'torch_and_transformers_and_onnx')
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , UpperCAmelCase_)
self.assertIn('torch_and_transformers' , UpperCAmelCase_)
self.assertIn('flax_and_transformers' , UpperCAmelCase_)
self.assertIn('torch_and_transformers_and_onnx' , UpperCAmelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Tuple = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(UpperCAmelCase_ , '\nCONSTANT = None\n')
UpperCamelCase__ : int = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
UpperCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
UpperCamelCase__ : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
UpperCamelCase__ : Dict = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Union[str, Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
UpperCamelCase__ : Optional[Any] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , UpperCAmelCase_)
| 596 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = TaTokenizer
_lowerCamelCase = []
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : Union[str, Any]=100 , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ : Any = [F'<extra_id_{i}>' for i in range(UpperCAmelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__ : Union[str, Any] = len(set(filter(lambda UpperCAmelCase_: bool('extra_id_' in str(UpperCAmelCase_)) , UpperCAmelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Union[str, Any] = vocab_file
UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True
UpperCamelCase__ : Tuple = extra_ids
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__ : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCAmelCase_ , )
return max_model_length
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
logger.info(F'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__ : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCamelCase ( self : Tuple):
return list(
set(filter(lambda UpperCAmelCase_: bool(re.search(R'<extra_id_\d+>' , UpperCAmelCase_)) is not None , self.additional_special_tokens)))
def __UpperCamelCase ( self : Dict):
return [self.convert_tokens_to_ids(UpperCAmelCase_) for token in self.get_sentinel_tokens()]
| 596 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase = pytest.mark.integration
__UpperCAmelCase = {"""comet"""}
__UpperCAmelCase = importlib.util.find_spec("""fairseq""") is not None
__UpperCAmelCase = {"""code_eval"""}
__UpperCAmelCase = os.name == """nt"""
__UpperCAmelCase = {"""bertscore""", """frugalscore""", """perplexity"""}
__UpperCAmelCase = importlib.util.find_spec("""transformers""") is not None
def snake_case_ (__A : Optional[int] ) -> str:
@wraps(__A )
def wrapper(self : int , __A : Union[str, Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , __A )
return wrapper
def snake_case_ (__A : Any ) -> Tuple:
@wraps(__A )
def wrapper(self : List[Any] , __A : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , __A )
return wrapper
def snake_case_ (__A : Optional[Any] ) -> List[Any]:
@wraps(__A )
def wrapper(self : Optional[Any] , __A : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , __A )
return wrapper
def snake_case_ () -> Tuple:
__lowerCAmelCase : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
a_ , a_ , a_ )
@local
class SCREAMING_SNAKE_CASE ( parameterized.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] ={}
lowerCamelCase : Union[str, Any] =None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = """[...]"""
__lowerCAmelCase : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCAmelCase ) ).module_path )
__lowerCAmelCase : Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase )
# check parameters
__lowerCAmelCase : str = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCAmelCase : Union[str, Any] = doctest.testmod(lowerCAmelCase , verbose=lowerCAmelCase , raise_on_error=lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """[...]"""
__lowerCAmelCase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCAmelCase : int = doctest.testmod(lowerCAmelCase , verbose=lowerCAmelCase , raise_on_error=lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
def load_local_metric(lowerCAmelCase : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ):
return load_metric(os.path.join("""metrics""" , lowerCAmelCase ) , *lowerCAmelCase , **lowerCAmelCase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
__lowerCAmelCase : List[str] = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase : Any ) -> int:
"""simple docstring"""
def wrapper(lowerCAmelCase : str ):
__lowerCAmelCase : List[str] = contextmanager(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def snake_case_ (__A : Optional[Any] ) -> Any:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
__lowerCAmelCase : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def snake_case_ (__A : Tuple ) -> Optional[Any]:
import torch
def bert_cos_score_idf(__A : List[str] , __A : Optional[Any] , *__A : Any , **__A : int ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__A ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
__lowerCAmelCase : Union[str, Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def snake_case_ (__A : List[str] ) -> Dict:
def load_from_checkpoint(__A : List[Any] ):
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
assert len(lowerCAmelCase ) == 2
__lowerCAmelCase : Dict = [0.19, 0.92]
return scores, sum(lowerCAmelCase ) / len(lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
__lowerCAmelCase : Optional[Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
__lowerCAmelCase : Union[str, Any] = load_from_checkpoint
yield
def snake_case_ () -> Dict:
__lowerCAmelCase : List[str] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
__lowerCAmelCase : str = """ERROR"""
__lowerCAmelCase : Optional[Any] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__A , match=re.escape(__A ) ):
metric.compute(predictions=[] , references=[] , scheme=__A )
| 712 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =(DEISMultistepScheduler,)
lowerCamelCase : Optional[int] =(("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Dict=0 , **lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : List[str] = self.dummy_sample
__lowerCAmelCase : List[Any] = 0.1 * sample
__lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = dict(self.forward_default_kwargs )
__lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : int = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Tuple = self.get_scheduler_config()
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase : str = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , """set_timesteps""" ):
__lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.timesteps[5]
__lowerCAmelCase : Tuple = scheduler.timesteps[6]
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : str = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
__lowerCAmelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Dict = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""deis""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
__lowerCAmelCase : str = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.full_loop()
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.full_loop(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Tuple = 10
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 218 | 0 |
import math
def _lowerCAmelCase ( A__ , A__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(A__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
a__ : Optional[int] = "Enter the base and the power separated by a comma: "
a__ , a__ : List[Any] = map(int, input(prompt).split(","))
a__ , a__ : Any = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
a__ : List[str] = res(xa, ya)
a__ : int = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 622 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {"vocab_file": "spiece.model"}
a__ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
a__ : Union[str, Any] = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
a__ : Optional[int] = "▁"
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=False , lowerCAmelCase : str="[CLS]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="[SEP]" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : int="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> None:
"""simple docstring"""
lowercase__ = (
AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase)
else mask_token
)
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase)
@property
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , lowerCAmelCase : str) -> Any:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
if self.remove_space:
lowercase__ = ' '.join(inputs.strip().split())
else:
lowercase__ = inputs
lowercase__ = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
lowercase__ = unicodedata.normalize('NFKD' , lowerCAmelCase)
lowercase__ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase)])
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(lowerCAmelCase)
lowercase__ = self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
lowercase__ = []
for piece in pieces:
if len(lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCAmelCase)
else:
new_pieces.append(lowerCAmelCase)
return new_pieces
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
lowercase__ = False
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
| 622 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase__ ( _A , _A = "cpu" , _A = None ):
"""simple docstring"""
a_ = torch.load(_A , map_location=_A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
a_ = v.half()
if save_path is None: # overwrite src_path
a_ = src_path
torch.save(_A , _A )
if __name__ == "__main__":
fire.Fire(convert)
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowercase ):
snake_case : str = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case : str = 192
snake_case : Union[str, Any] = 768
snake_case : List[Any] = 12
snake_case : Any = 3
snake_case : Union[str, Any] = [800, 1_333]
snake_case : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
snake_case : str = 330
snake_case : List[Any] = 14
snake_case : List[Any] = 6
snake_case : Union[str, Any] = 1_320
elif "yolos_s" in yolos_name:
snake_case : int = 384
snake_case : Any = 1_536
snake_case : str = 12
snake_case : Any = 6
elif "yolos_b" in yolos_name:
snake_case : List[Any] = [800, 1_344]
snake_case : Optional[Any] = 91
snake_case : List[str] = """huggingface/label-files"""
snake_case : List[Any] = """coco-detection-id2label.json"""
snake_case : Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="""dataset""" ) , """r""" ) )
snake_case : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
snake_case : int = idalabel
snake_case : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Tuple = in_proj_weight[: config.hidden_size, :]
snake_case : List[Any] = in_proj_bias[: config.hidden_size]
snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : str = in_proj_weight[-config.hidden_size :, :]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __lowercase ):
if "backbone" in name:
snake_case : List[Any] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
snake_case : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
snake_case : Any = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
snake_case : List[Any] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
snake_case : Dict = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
snake_case : Optional[int] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
snake_case : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
snake_case : List[str] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
snake_case : Union[str, Any] = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
snake_case : int = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def lowerCamelCase__ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
snake_case : str = orig_state_dict.pop(__lowercase )
if "qkv" in key:
snake_case : str = key.split(""".""" )
snake_case : str = int(key_split[2] )
snake_case : str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case : Any = val[:dim, :]
snake_case : Optional[int] = val[
dim : dim * 2, :
]
snake_case : Union[str, Any] = val[-dim:, :]
else:
snake_case : Dict = val[:dim]
snake_case : List[str] = val[dim : dim * 2]
snake_case : Union[str, Any] = val[-dim:]
else:
snake_case : Union[str, Any] = val
return orig_state_dict
def lowerCamelCase__ ( ):
snake_case : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Tuple = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ):
snake_case : int = get_yolos_config(__lowercase )
# load original state_dict
snake_case : Dict = torch.load(__lowercase , map_location="""cpu""" )["""model"""]
# load 🤗 model
snake_case : Optional[int] = YolosForObjectDetection(__lowercase )
model.eval()
snake_case : str = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case : Tuple = 800 if yolos_name != """yolos_ti""" else 512
snake_case : Any = YolosImageProcessor(format="""coco_detection""" , size=__lowercase )
snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case : str = model(**__lowercase )
snake_case , snake_case : Optional[Any] = outputs.logits, outputs.pred_boxes
snake_case , snake_case : str = None, None
if yolos_name == "yolos_ti":
snake_case : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
snake_case : Union[str, Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
snake_case : Tuple = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
snake_case : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
snake_case : Optional[Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
snake_case : Optional[int] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
snake_case : List[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
snake_case : str = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
snake_case : Optional[Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
snake_case : Any = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
snake_case : Optional[Any] = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
snake_case : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization="""hustvl""" )
model.push_to_hub(__lowercase , organization="""hustvl""" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Tuple = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 116 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _a :
'''simple docstring'''
def __init__( self ,__a ,__a ,__a = True ,__a = False ) -> Dict:
snake_case : List[Any] = scheduler
snake_case : List[Any] = optimizers if isinstance(__a ,(list, tuple) ) else [optimizers]
snake_case : List[str] = split_batches
snake_case : List[str] = step_with_optimizer
snake_case : int = GradientState()
def snake_case_ ( self ,*__a ,**__a ) -> Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__a ,**__a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__a ,**__a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case : List[str] = AcceleratorState().num_processes
for _ in range(__a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,"""total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__a ,**__a )
else:
self.scheduler.step(*__a ,**__a )
def snake_case_ ( self ) -> Dict:
return self.scheduler.get_last_lr()
def snake_case_ ( self ) -> int:
return self.scheduler.state_dict()
def snake_case_ ( self ,__a ) -> Any:
self.scheduler.load_state_dict(__a )
def snake_case_ ( self ) -> Any:
return self.scheduler.get_lr()
def snake_case_ ( self ,*__a ,**__a ) -> Optional[Any]:
return self.scheduler.print_lr(*__a ,**__a )
| 116 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
__magic_name__ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__magic_name__ , __magic_name__ = imgs[0].size
__magic_name__ = Image.new('''RGB''' , size=(cols * w, rows * h) )
__magic_name__ , __magic_name__ = grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[Any]=42 , ):
__magic_name__ = torch.Generator(pipeline.device ).manual_seed(snake_case_ )
__magic_name__ = pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
__magic_name__ = int(math.sqrt(snake_case_ ) )
__magic_name__ = image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a_ : Dict = parse_args()
# Load models and create wrapper for stable diffusion
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
a_ : Optional[int] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
a_ : str = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
a_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
a_ : List[str] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a_ : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
a_ : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
a_ : Optional[int] = unet.to(torch.device('cuda', args.cuda_id))
a_ : Optional[int] = pipeline.to(unet.device)
a_ : Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
a_ : str = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 712 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : str = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Optional[Any] = _get_torch_home()
except ImportError:
a_ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
a_ : Any = os.path.join(torch_cache_home, 'transformers')
a_ : Any = 'https://cdn.huggingface.co'
a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
a_ : Any = os.path.join(PATH, 'config.yaml')
a_ : Any = os.path.join(PATH, 'attributes.txt')
a_ : Any = os.path.join(PATH, 'objects.txt')
a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
a_ : int = 'pytorch_model.bin'
a_ : Union[str, Any] = 'config.yaml'
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ):
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__magic_name__ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__magic_name__ = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__magic_name__ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__magic_name__ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__magic_name__ = v
return r
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = {}
def __init__( self , A , A = "root" , A=0 ) -> List[str]:
'''simple docstring'''
__magic_name__ = name
__magic_name__ = level
__magic_name__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__magic_name__ = copy.deepcopy(A )
__magic_name__ = copy.deepcopy(A )
if isinstance(A , A ):
__magic_name__ = Config(A , name=A , level=level + 1 )
__magic_name__ = v
setattr(self , A , A )
__magic_name__ = d
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , A , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = val
__magic_name__ = val
__magic_name__ = key.split('''.''' )
__magic_name__ = len(A ) - 1
__magic_name__ = self._pointer
if len(A ) > 1:
for i, l in enumerate(A ):
if hasattr(self , A ) and isinstance(getattr(self , A ) , A ):
setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A )
if l == last_level:
__magic_name__ = val
else:
__magic_name__ = pointer[l]
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self._pointer
def __A ( self , A , A ) -> Any:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
dump(A , A )
def __A ( self , A , A ) -> List[Any]:
'''simple docstring'''
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(A , A )
@staticmethod
def __A ( A ) -> Optional[Any]:
'''simple docstring'''
with open(A ) as stream:
__magic_name__ = load(A , Loader=A )
return data
def __str__( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = ''' '''
if self._name != "root":
__magic_name__ = F'{t * (self._level-1)}{self._name}:\n'
else:
__magic_name__ = ''''''
__magic_name__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(A , A ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n'
__magic_name__ = level
return r[:-1]
@classmethod
def __A ( cls , A , **A ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ = cls.get_config_dict(A , **A )
return cls(A )
@classmethod
def __A ( cls , A , **A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = kwargs.pop('''cache_dir''' , A )
__magic_name__ = kwargs.pop('''force_download''' , A )
__magic_name__ = kwargs.pop('''resume_download''' , A )
__magic_name__ = kwargs.pop('''proxies''' , A )
__magic_name__ = kwargs.pop('''local_files_only''' , A )
if os.path.isdir(A ):
__magic_name__ = os.path.join(A , A )
elif os.path.isfile(A ) or is_remote_url(A ):
__magic_name__ = pretrained_model_name_or_path
else:
__magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A )
try:
# Load from URL or cache if already cached
__magic_name__ = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__magic_name__ = Config.load_yaml(A )
except EnvironmentError:
__magic_name__ = '''Can\'t load config for'''
raise EnvironmentError(A )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(A ), kwargs
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
__magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
__magic_name__ = in_tensor.numpy()
__magic_name__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ):
__magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__magic_name__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ):
__magic_name__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__magic_name__ = {'''user-agent''': ua}
if resume_size > 0:
__magic_name__ = '''bytes=%d-''' % (resume_size,)
__magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__magic_name__ = response.headers.get('''Content-Length''' )
__magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None
__magic_name__ = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__magic_name__ = None
if not local_files_only:
try:
__magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__magic_name__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__magic_name__ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__magic_name__ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__magic_name__ = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__magic_name__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__magic_name__ = _resumable_file_manager
if os.path.exists(snake_case_ ):
__magic_name__ = os.stat(snake_case_ ).st_size
else:
__magic_name__ = 0
else:
__magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__magic_name__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__magic_name__ = {'''url''': url, '''etag''': etag}
__magic_name__ = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ):
__magic_name__ = url.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
__magic_name__ = url_hash.hexdigest()
if etag:
__magic_name__ = etag.encode('''utf-8''' )
__magic_name__ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ):
if cache_dir is None:
__magic_name__ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__magic_name__ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__magic_name__ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__magic_name__ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__magic_name__ , __magic_name__ = os.path.split(snake_case_ )
__magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__magic_name__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__magic_name__ = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__magic_name__ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__magic_name__ = eval(f.read() )
else:
__magic_name__ = requests.get(snake_case_ )
try:
__magic_name__ = requests.json()
except Exception:
__magic_name__ = req.content.decode()
assert data is not None, "could not connect"
try:
__magic_name__ = eval(snake_case_ )
except Exception:
__magic_name__ = data.split('''\n''' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
__magic_name__ = requests.get(snake_case_ )
__magic_name__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
__magic_name__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__magic_name__ = pkl.load(snake_case_ )
__magic_name__ = weights.pop('''model''' )
__magic_name__ = {}
for k, v in model.items():
__magic_name__ = torch.from_numpy(snake_case_ )
if "running_var" in k:
__magic_name__ = torch.tensor([0] )
__magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' )
__magic_name__ = zero
return new
def _SCREAMING_SNAKE_CASE ( ):
print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' )
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__magic_name__ = cva.imread(snake_case_ )
else:
__magic_name__ = get_image_from_url(snake_case_ )
assert img is not None, f'could not connect to: {im}'
__magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__magic_name__ = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ )) | 678 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : Optional[Any] = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = FunnelTokenizer
UpperCAmelCase_ : Any = FunnelTokenizerFast
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Any = True
def UpperCAmelCase_ ( self : str ) -> int:
super().setUp()
UpperCAmelCase : Any = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : Any , **lowercase_ : List[str] ) -> List[Any]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : str , **lowercase_ : Optional[Any] ) -> List[str]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> int:
UpperCAmelCase : List[Any] = 'UNwant\u00E9d,running'
UpperCAmelCase : Optional[Any] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : int = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
UpperCAmelCase : str = tokenizer('UNwant\u00E9d,running' )
UpperCAmelCase : List[str] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
UpperCAmelCase : Optional[int] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420 | """simple docstring"""
from __future__ import annotations
def lowercase ( a__ : list[float] , a__ : list[float] ) -> float:
_UpperCamelCase = sorted(numsa + numsa )
_UpperCamelCase , _UpperCamelCase = divmod(len(a__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 420 | 1 |
'''simple docstring'''
def __A ( a_ : int ):
if not isinstance(a_ ,a_ ):
raise TypeError("only integers accepted as input" )
else:
lowerCAmelCase : Union[str, Any] = str(abs(a_ ) )
lowerCAmelCase : int = [list(a_ ) for char in range(len(a_ ) )]
for index in range(len(a_ ) ):
num_transpositions[index].pop(a_ )
return max(
int("".join(list(a_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 705 |
'''simple docstring'''
def __A ( a_ : int ,a_ : int ):
return abs(a_ ) if a == 0 else greatest_common_divisor(b % a ,a_ )
def __A ( a_ : int ,a_ : int ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCAmelCase , lowerCAmelCase : Tuple = y, x % y
return abs(a_ )
def __A ( ):
try:
lowerCAmelCase : Tuple = input("Enter two integers separated by comma (,): " ).split("," )
lowerCAmelCase : Tuple = int(nums[0] )
lowerCAmelCase : Tuple = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(a_ ,a_ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(a_ ,a_ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 551 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] ):
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = 0
_snake_case = False
while not completed:
if counter == 1:
self.reset()
_snake_case = self.advance()
if not self.does_advance(__lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_snake_case , _snake_case , _snake_case = self.update(__lowerCamelCase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , __lowerCamelCase : List[int] ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(__lowerCamelCase , __lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_snake_case = token_ids
_snake_case = len(self.token_ids )
_snake_case = -1 # the index of the currently fulfilled step
_snake_case = False
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(__lowerCamelCase ):
self.fulfilled_idx += 1
_snake_case = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case = True
_snake_case = completed
else:
# failed to make progress.
_snake_case = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = False
_snake_case = 0
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str=False ):
"""simple docstring"""
_snake_case = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.fulfilled_idx
_snake_case = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : List[List[int]] , __lowerCamelCase : Optional[Any]=True ):
"""simple docstring"""
_snake_case = max([len(__lowerCamelCase ) for one in nested_token_ids] )
_snake_case = {}
for token_ids in nested_token_ids:
_snake_case = root
for tidx, token_id in enumerate(__lowerCamelCase ):
if token_id not in level:
_snake_case = {}
_snake_case = level[token_id]
if no_subsets and self.has_subsets(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
_snake_case = root
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = self.trie
for current_token in current_seq:
_snake_case = start[current_token]
_snake_case = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = self.next_tokens(__lowerCamelCase )
return len(__lowerCamelCase ) == 0
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = list(root.values() )
if len(__lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowerCamelCase ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = self.count_leaves(__lowerCamelCase )
return len(__lowerCamelCase ) != leaf_count
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __lowerCamelCase : List[List[int]] ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(__lowerCamelCase , __lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(__lowerCamelCase , __lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_snake_case = DisjunctiveTrie(__lowerCamelCase )
_snake_case = nested_token_ids
_snake_case = self.trie.max_height
_snake_case = []
_snake_case = False
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.trie.next_tokens(self.current_seq )
if len(__lowerCamelCase ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(__lowerCamelCase ):
self.current_seq.append(__lowerCamelCase )
_snake_case = True
else:
_snake_case = True
self.reset()
_snake_case = self.trie.reached_leaf(self.current_seq )
_snake_case = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = False
_snake_case = []
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
_snake_case = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.current_seq
_snake_case = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self : Optional[int] , __lowerCamelCase : List[Constraint] ):
"""simple docstring"""
_snake_case = constraints
# max # of steps required to fulfill a given constraint
_snake_case = max([c.seqlen for c in constraints] )
_snake_case = len(__lowerCamelCase )
_snake_case = False
self.init_state()
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = []
_snake_case = None
_snake_case = [constraint.copy(stateful=__lowerCamelCase ) for constraint in self.constraints]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case = constraint.advance()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.append(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.extend(__lowerCamelCase )
else:
_snake_case = self.inprogress_constraint.advance()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.append(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.extend(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case , _snake_case = self.add(__lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_snake_case , _snake_case = False, False
if self.completed:
_snake_case = True
_snake_case = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case , _snake_case , _snake_case = self.inprogress_constraint.update(__lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCamelCase ) )
_snake_case = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowerCamelCase ):
_snake_case , _snake_case , _snake_case = pending_constraint.update(__lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowerCamelCase )
_snake_case = None
if not complete and stepped:
_snake_case = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=True ):
"""simple docstring"""
_snake_case = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case = [
constraint.copy(stateful=__lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case = self.inprogress_constraint.copy(stateful=__lowerCamelCase )
_snake_case = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 103 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt'''}
snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=2_0_0 ):
"""simple docstring"""
_snake_case = vocab
_snake_case = unk_token
_snake_case = max_input_chars_per_word
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = list(__lowerCamelCase )
if len(__lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_snake_case = 0
_snake_case = []
while start < len(__lowerCamelCase ):
_snake_case = len(__lowerCamelCase )
_snake_case = None
while start < end:
_snake_case = ''''''.join(chars[start:end] )
if substr in self.vocab:
_snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCamelCase )
_snake_case = end
return sub_tokens
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Optional[int] = False
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str="<d>" , __lowerCamelCase : Tuple="</d>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="</n>" , __lowerCamelCase : Tuple="</_>" , __lowerCamelCase : Optional[Any]="left" , **__lowerCamelCase : str , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCamelCase , eod_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , unk_token=__lowerCamelCase , line_token=__lowerCamelCase , space_token=__lowerCamelCase , padding_side=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = bod_token
_snake_case = eod_token
_snake_case = load_vocab(__lowerCamelCase )
_snake_case = self.encoder[space_token]
_snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = []
for x in jieba.cut(__lowerCamelCase , cut_all=__lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = [i for i in token_ids if i >= 0]
_snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return token in self.encoder
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
return "".join(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_snake_case = 0
if " " in self.encoder:
_snake_case = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_snake_case = self.encoder['''\n''']
del self.encoder["\n"]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase ))
| 103 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
snake_case = parser.parse_args()
snake_case = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 587 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case = logging.get_logger(__name__)
snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
snake_case = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : Dict = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Union[str, Any] = char
return pairs
class __A ( snake_case__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="replace" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=False , **_snake_case , ):
_lowerCAmelCase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
_lowerCAmelCase : Optional[int] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
_lowerCAmelCase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
_lowerCAmelCase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_lowerCAmelCase : Tuple = json.load(_snake_case )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
_lowerCAmelCase : Any = bytes_to_unicode()
_lowerCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding="utf-8" ) as merges_handle:
_lowerCAmelCase : List[str] = merges_handle.read().split("\n" )[1:-1]
_lowerCAmelCase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = tuple(_snake_case )
_lowerCAmelCase : Union[str, Any] = get_pairs(_snake_case )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Tuple = bigram
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = 0
while i < len(_snake_case ):
try:
_lowerCAmelCase : Union[str, Any] = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_snake_case )
_lowerCAmelCase : str = new_word
if len(_snake_case ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_snake_case )
_lowerCAmelCase : Tuple = " ".join(_snake_case )
_lowerCAmelCase : Optional[int] = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat , _snake_case ):
_lowerCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.decoder.get(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : str = "".join(_snake_case )
_lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Any = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + "\n" )
_lowerCAmelCase : Any = 0
with open(_snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
_lowerCAmelCase : Union[str, Any] = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=False , **_snake_case ):
_lowerCAmelCase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
_lowerCAmelCase : List[str] = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = PaddingStrategy.DO_NOT_PAD , _snake_case = None , _snake_case = None , ):
_lowerCAmelCase : Union[str, Any] = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(_snake_case )
if needs_to_be_padded:
_lowerCAmelCase : Any = len(_snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[str] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 587 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a__ = get_logger(__name__)
a__ = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase )
def __call__( self : Tuple , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase )
def __call__( self : Optional[int] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
__UpperCamelCase : Optional[Any] = inspect.signature(processor.__call__ ).parameters
if len(lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
__UpperCamelCase : int = processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
else:
__UpperCamelCase : Dict = processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : float ) -> Dict:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCamelCase : Optional[Any] = temperature
def __call__( self : int , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : float , lowerCAmelCase : float = -float("""Inf""" ) , lowerCAmelCase : int = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCamelCase : int = top_p
__UpperCamelCase : Tuple = filter_value
__UpperCamelCase : Tuple = min_tokens_to_keep
def __call__( self : Optional[int] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[Any] = lax.top_k(lowerCAmelCase , scores.shape[-1] )
__UpperCamelCase : List[str] = jnp.full_like(lowerCAmelCase , self.filter_value )
__UpperCamelCase : List[Any] = jax.nn.softmax(lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
__UpperCamelCase : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCamelCase : int = jnp.roll(lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCAmelCase )
# min tokens to keep
__UpperCamelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCAmelCase )
__UpperCamelCase : List[Any] = jnp.where(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Optional[int] = jax.lax.sort_key_val(lowerCAmelCase , lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : float = -float("""Inf""" ) , lowerCAmelCase : int = 1 ) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCamelCase : List[str] = max(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Tuple = filter_value
def __call__( self : List[str] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] = scores.shape
__UpperCamelCase : Optional[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCamelCase : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCamelCase , __UpperCamelCase : int = lax.top_k(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : List[str] = jnp.broadcast_to((jnp.arange(lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCamelCase : List[Any] = topk_scores.flatten()
__UpperCamelCase : Optional[Any] = topk_indices.flatten() + shift
__UpperCamelCase : List[str] = next_scores_flat.at[topk_indices_flat].set(lowerCAmelCase )
__UpperCamelCase : str = next_scores_flat.reshape(lowerCAmelCase , lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = bos_token_id
def __call__( self : Optional[Any] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase : Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__UpperCamelCase : Any = jnp.where(lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : int = max_length
__UpperCamelCase : Optional[int] = eos_token_id
def __call__( self : Union[str, Any] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase : List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCamelCase : List[str] = jnp.where(lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCamelCase : Union[str, Any] = min_length
__UpperCamelCase : Any = eos_token_id
def __call__( self : Union[str, Any] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase : List[str] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCamelCase : Dict = jnp.where(lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : str = list(lowerCAmelCase )
__UpperCamelCase : Tuple = begin_index
def __call__( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
__UpperCamelCase : str = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCamelCase : Dict = jnp.where(lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase : list ) -> int:
"""simple docstring"""
__UpperCamelCase : Tuple = list(lowerCAmelCase )
def __call__( self : List[str] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
__UpperCamelCase : Dict = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = dict(lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCamelCase : int = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCamelCase : Optional[int] = force_token_array.at[index].set(lowerCAmelCase )
__UpperCamelCase : List[Any] = jnp.intaa(lowerCAmelCase )
def __call__( self : List[str] , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(lowerCAmelCase : Optional[int] ):
__UpperCamelCase : List[Any] = scores.shape[0]
__UpperCamelCase : int = self.force_token_array[generation_idx]
__UpperCamelCase : int = jnp.ones_like(lowerCAmelCase , dtype=scores.dtype ) * -float("""inf""" )
__UpperCamelCase : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCamelCase : List[Any] = lax.dynamic_update_slice(lowerCAmelCase , lowerCAmelCase , (0, current_token) )
return new_scores
__UpperCamelCase : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : str = generate_config.eos_token_id
__UpperCamelCase : int = generate_config.no_timestamps_token_id
__UpperCamelCase : Dict = generate_config.no_timestamps_token_id + 1
__UpperCamelCase : Optional[int] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCAmelCase , """max_initial_timestamp_index""" ):
__UpperCamelCase : Optional[int] = generate_config.max_initial_timestamp_index
else:
__UpperCamelCase : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCamelCase : Dict = model_config.vocab_size
def __call__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCAmelCase : Tuple , lowerCAmelCase : int ):
__UpperCamelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) >= 1 , lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCAmelCase , )
__UpperCamelCase : Dict = jnp.where((cur_len - self.begin_index) < 2 , lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCAmelCase , lowerCAmelCase , )
return jnp.where(
lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , lowerCAmelCase , )
__UpperCamelCase : Any = jax.vmap(lowerCAmelCase )(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = jnp.where(cur_len == self.begin_index , lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCAmelCase , )
__UpperCamelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCamelCase : Any = jnp.where(
lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCamelCase : Optional[int] = jax.nn.log_softmax(lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCamelCase : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , lowerCAmelCase , )
__UpperCamelCase : List[Any] = jax.vmap(lowerCAmelCase )(lowerCAmelCase , lowerCAmelCase )
return scores
| 279 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a__ = get_tests_dir('''fixtures/dummy-config.json''')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 0
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : int = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__UpperCamelCase : List[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCamelCase : Union[str, Any] = os.path.join(lowerCAmelCase , """fake-roberta""" )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(type(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> str:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoConfig.register("""model""" , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoConfig.register("""bert""" , lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : Optional[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase )
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase , revision="""aaaaaa""" )
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
__UpperCamelCase : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase )
__UpperCamelCase : str = AutoConfig.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : int = 'new-model'
try:
AutoConfig.register("""new-model""" , lowerCAmelCase )
# If remote code is not set, the default is to use local
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__UpperCamelCase : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 279 | 1 |
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__SCREAMING_SNAKE_CASE = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__SCREAMING_SNAKE_CASE = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] ={prediction['id']: prediction['prediction_text'] for prediction in predictions}
SCREAMING_SNAKE_CASE_ : Optional[Any] =[
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ : Tuple =evaluate(dataset=__UpperCAmelCase , predictions=__UpperCAmelCase )
return score
| 153 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : List[str] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_ : str =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 153 | 1 |
"""simple docstring"""
from math import pi, sqrt, tan
def _A (__a ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A (__a ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _A (__a ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _A (__a , __a ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A (__a , __a ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _A (__a , __a ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__a , 2 ) * torus_radius * tube_radius
def _A (__a , __a ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _A (__a ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _A (__a , __a ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE_ : Tuple = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A (__a , __a ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _A (__a ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _A (__a , __a ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _A (__a , __a ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _A (__a , __a ) -> float:
"""simple docstring"""
if not isinstance(__a , __a ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 512 |
"""simple docstring"""
def _A (__a ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
# Input as list
lowerCAmelCase__ = list(poly_a or [0] )[:]
lowerCAmelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase__ = self.__multiply()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase__ = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase__ = [[] for i in range(lowerCamelCase_ )]
lowerCAmelCase__ = self.root**next_ncol
# First half of next step
lowerCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase__ = new_dft
lowerCAmelCase__ = next_ncol // 2
return dft[0]
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.__dft('''A''' )
lowerCAmelCase__ = self.__dft('''B''' )
lowerCAmelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase__ = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase__ = [[] for i in range(lowerCamelCase_ )]
lowerCAmelCase__ = self.root ** (next_ncol // 2)
lowerCAmelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Optional[int]:
lowerCAmelCase__ = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase__ = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase__ = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 98 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__UpperCAmelCase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Any:
lowerCAmelCase__ = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ , repo_id='''test-model-flax''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
lowerCAmelCase__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCAmelCase__ = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase_ , 1e-3 , msg=F"""{key} not identical""" )
def _snake_case ( A , A ) -> Optional[int]:
lowerCAmelCase__ = True
lowerCAmelCase__ = flatten_dict(modela.params )
lowerCAmelCase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase__ = False
return models_are_equal
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
lowerCAmelCase__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCAmelCase__ = FlaxBertModel(lowerCamelCase_ )
lowerCAmelCase__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertTrue(check_models_equal(lowerCamelCase_ , lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''bert'''
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''bert'''
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = FlaxBertModel.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ ) | 98 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ : Optional[Any] = logging.getLogger()
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="eval" ) -> int:
"""simple docstring"""
UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , f"{split}_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
raise ValueError(f"can't find {path}" )
a__ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_flax_glue.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_clm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_summarization_flax.main()
UpperCAmelCase = get_results(a__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_mlm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __snake_case ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_flax_ner.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(a__ , '''argv''' , a__ ):
run_qa.main()
UpperCAmelCase = get_results(a__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 51 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE :Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE :str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE :int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self : Optional[Any] , a__ : List[Any] , a__ : List[str] , a__ : List[Any] ):
__magic_name__ = ZeroShotClassificationPipeline(
model=a__ , tokenizer=a__ , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self : str , a__ : int , a__ : List[str] ):
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
# No kwarg
__magic_name__ = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__magic_name__ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__magic_name__ = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(a__ , {'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ )], '''scores''': [ANY(a__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__magic_name__ = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]}
for i in range(1 )
] , )
__magic_name__ = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''labels''': [ANY(a__ ), ANY(a__ )], '''scores''': [ANY(a__ ), ANY(a__ )]}
for i in range(2 )
] , )
with self.assertRaises(a__ ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(a__ ):
classifier(a__ , candidate_labels='''politics''' )
with self.assertRaises(a__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(a__ ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=a__ )
with self.assertRaises(a__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(a__ ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=a__ , )
self.run_entailment_id(a__ )
def snake_case__ ( self : Optional[Any] , a__ : Pipeline ):
__magic_name__ = zero_shot_classifier.model.config
__magic_name__ = config.labelaid
__magic_name__ = zero_shot_classifier.entailment_id
__magic_name__ = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__magic_name__ = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__magic_name__ = original_labelaid
self.assertEqual(a__ , zero_shot_classifier.entailment_id )
@require_torch
def snake_case__ ( self : str ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def snake_case__ ( self : Dict ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def snake_case__ ( self : Any ):
__magic_name__ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def snake_case__ ( self : Any ):
__magic_name__ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def snake_case__ ( self : int ):
__magic_name__ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__magic_name__ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 432 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
SCREAMING_SNAKE_CASE_ = '2020.9.26'
SCREAMING_SNAKE_CASE_ = 'xcodz-dot, cclaus, dhruvmanila'
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in locals().values() ):
__a = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(UpperCamelCase__ )
__a = ((x * distance) / (z + distance)) * scale
__a = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""Axis must be a str""" )
__a = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in input_variables.values() ):
__a = (
"""Input values except axis must either be float or int: """
F'''{list(input_variables.values() )}'''
)
raise TypeError(UpperCamelCase__ )
__a = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__a = x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
__a = y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
__a = z
elif axis == "x":
__a = y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
__a = z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
__a = x
elif axis == "y":
__a = x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
__a = z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
__a = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 720 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :Optional[int] =ConsistencyModelPipeline
a_ :List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ :Optional[int] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ :Optional[Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __a ( self : Dict ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __a ( self : Tuple ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __a ( self : str , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
if class_cond:
__a = self.dummy_cond_unet
else:
__a = self.dummy_uncond_unet
# Default to CM multistep sampler
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__a = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def __a ( self : str ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : int ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 0
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : str ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : List[Any] ):
'''simple docstring'''
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__a = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__a = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
__a = 0
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int="cpu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=torch.floataa , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
__a = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__a = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
__a = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__a = latents
return inputs
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]="cpu" , SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa , SCREAMING_SNAKE_CASE__ : List[str]=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE__ ) == str:
__a = torch.device(SCREAMING_SNAKE_CASE__ )
__a = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
return latents
def __a ( self : str ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs()
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __a ( self : List[Any] ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs()
__a = 1
__a = None
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __a ( self : Tuple ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__a = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__a = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
__a = 1
__a = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ , enable_math=SCREAMING_SNAKE_CASE__ , enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__a = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__a = image[0, -3:, -3:, -1]
__a = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 201 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( A__ ):
if isinstance(A__, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = after_output[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : str = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ : Any = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ : Tuple = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = inputs_dict
SCREAMING_SNAKE_CASE_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model(**lowerCAmelCase__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : Dict = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE_ : Optional[int] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE_ : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ : int = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model_a(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = after_outputs[0]
SCREAMING_SNAKE_CASE_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_3
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = 1_3
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FlaxCLIPVisionModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE_ : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 101 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = '\\n\n'
__UpperCAmelCase : List[str] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase : Union[str, Any] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def snake_case_ ( self : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int = 16 , __snake_case : bool = True , __snake_case : Dict=None ) -> List[str]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a = '''cuda'''
else:
_a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a = AutoModelForCausalLM.from_pretrained(_snake_case )
_a = model.to(_snake_case )
_a = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a = model.config.max_length - 1
else:
_a = model.config.max_length
_a = tokenizer(
_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors='''pt''' , return_attention_mask=_snake_case , ).to(_snake_case )
_a = encodings['''input_ids''']
_a = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a = []
_a = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_snake_case ) , _snake_case ) ):
_a = min(start_index + batch_size , len(_snake_case ) )
_a = encoded_texts[start_index:end_index]
_a = attn_masks[start_index:end_index]
if add_start_token:
_a = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
_a = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_snake_case ), attn_mask] , dim=1 )
_a = encoded_batch
with torch.no_grad():
_a = model(_snake_case , attention_mask=_snake_case ).logits
_a = out_logits[..., :-1, :].contiguous()
_a = labels[..., 1:].contiguous()
_a = attn_mask[..., 1:].contiguous()
_a = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 708 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase_ )
_a : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase_ )
_a : List[str] = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
_a : str = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_a : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : str = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : int = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_a : int = f"""layers_{str(UpperCamelCase_ )}"""
# Self-Attention
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[str] = flax_model.params['''encoder''']['''block'''][str(UpperCamelCase_ )]['''layer''']
_a : Optional[Any] = tax_attention_key
_a : List[str] = tax_attention_out
_a : Union[str, Any] = tax_attention_query
_a : Tuple = tax_attention_value
_a : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : str = tax_global_layer_norm
if split_mlp_wi:
_a : Tuple = tax_mlp_wi_a
_a : Optional[Any] = tax_mlp_wi_a
else:
_a : Tuple = tax_mlp_wi
_a : Dict = tax_mlp_wo
_a : Optional[Any] = tax_mlp_layer_norm
_a : Dict = flax_model_encoder_layer_block
# Only for layer 0:
_a : Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Union[str, Any] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_a : Union[str, Any] = tax_encoder_global_rel_embedding
# Assigning
_a : List[str] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_a : Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : int = f"""layers_{str(UpperCamelCase_ )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_a : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_a : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_a : Union[str, Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
_a : List[Any] = tax_enc_dec_attention_module['''out''']['''kernel''']
_a : int = tax_enc_dec_attention_module['''query''']['''kernel''']
_a : Any = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_a : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[Any] = flax_model.params['''decoder''']['''block'''][str(UpperCamelCase_ )]['''layer''']
_a : List[Any] = tax_attention_key
_a : List[Any] = tax_attention_out
_a : Optional[Any] = tax_attention_query
_a : List[Any] = tax_attention_value
_a : Optional[int] = tax_pre_attention_layer_norm
_a : List[Any] = tax_enc_dec_attention_key
_a : Any = tax_enc_dec_attention_out
_a : Tuple = tax_enc_dec_attention_query
_a : Any = tax_enc_dec_attention_value
_a : Optional[int] = tax_cross_layer_norm
if split_mlp_wi:
_a : Dict = tax_mlp_wi_a
_a : Union[str, Any] = tax_mlp_wi_a
else:
_a : Dict = tax_mlp_wi
_a : str = tax_mlp_wo
_a : int = txa_mlp_layer_norm
_a : int = flax_model_decoder_layer_block
# Decoder Normalization
_a : List[str] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_a : int = txa_decoder_norm
# Only for layer 0:
_a : int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Tuple = tax_decoder_rel_embedding
# Token Embeddings
_a : str = tax_model['''target''']['''token_embedder''']['''embedding''']
_a : Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Optional[int] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(UpperCamelCase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 249 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Union[str, Any] = "▁"
lowercase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BertGenerationTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
super().setUp()
UpperCAmelCase_ = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1002 )
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [1_8536, 2260, 101]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = ''' '''.join(UpperCAmelCase__ )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = BertGenerationConfig()
UpperCAmelCase_ = BertGenerationEncoder(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 390 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase__ : List[str] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase__ ( self : Tuple ) ->str:
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self : int , UpperCAmelCase__ : int ) ->List[str]:
UpperCAmelCase_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ = [feature.pop(UpperCAmelCase__ ) for feature in features]
UpperCAmelCase_ = len(UpperCAmelCase__ )
UpperCAmelCase_ = len(features[0]['''input_ids'''] )
UpperCAmelCase_ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
UpperCAmelCase_ = list(chain(*UpperCAmelCase__ ) )
UpperCAmelCase_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase_ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase_ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase_ = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase_ = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase_ = '''sent1'''
UpperCAmelCase_ = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : List[str] ):
UpperCAmelCase_ = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase_ = examples[question_header_name]
UpperCAmelCase_ = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
# Tokenize
UpperCAmelCase_ = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_train_samples )
UpperCAmelCase_ = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase_ = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
UpperCAmelCase_ = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase_ = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_predictions
UpperCAmelCase_ = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
UpperCAmelCase_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 390 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowerCAmelCase__ = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None) | 628 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
# Load configuration defined in the metadata file
with open(_A ) as metadata_file:
lowerCAmelCase_ = json.load(_A )
lowerCAmelCase_ = LukeConfig(use_entity_aware_attention=_A , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
# Load the entity vocab file
lowerCAmelCase_ = load_entity_vocab(_A )
lowerCAmelCase_ = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase_ = AddedToken('''<ent>''' , lstrip=_A , rstrip=_A )
lowerCAmelCase_ = AddedToken('''<ent2>''' , lstrip=_A , rstrip=_A )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_A )
with open(os.path.join(_A , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_A , _A )
lowerCAmelCase_ = LukeTokenizer.from_pretrained(_A )
# Initialize the embeddings of the special tokens
lowerCAmelCase_ = state_dict['''embeddings.word_embeddings.weight''']
lowerCAmelCase_ = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
lowerCAmelCase_ = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
lowerCAmelCase_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase_ = f"encoder.layer.{layer_index}.attention.self."
lowerCAmelCase_ = state_dict[prefix + matrix_name]
lowerCAmelCase_ = state_dict[prefix + matrix_name]
lowerCAmelCase_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase_ = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCAmelCase_ = entity_emb[entity_vocab['''[MASK]''']]
lowerCAmelCase_ = LukeModel(config=_A ).eval()
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
if not (len(_A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(_A )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
lowerCAmelCase_ = LukeTokenizer.from_pretrained(_A , task='''entity_classification''' )
lowerCAmelCase_ = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowerCAmelCase_ = (39, 42)
lowerCAmelCase_ = tokenizer(_A , entity_spans=[span] , add_prefix_space=_A , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
# Verify word hidden states
if model_size == "large":
lowerCAmelCase_ = torch.Size((1, 42, 1024) )
lowerCAmelCase_ = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowerCAmelCase_ = torch.Size((1, 42, 768) )
lowerCAmelCase_ = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCAmelCase_ = torch.Size((1, 1, 1024) )
lowerCAmelCase_ = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowerCAmelCase_ = torch.Size((1, 1, 768) )
lowerCAmelCase_ = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _A , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_A ) )
model.save_pretrained(_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
with open(_A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_A ):
lowerCAmelCase_ , lowerCAmelCase_ = line.rstrip().split('''\t''' )
lowerCAmelCase_ = index
return entity_vocab
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 431 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class A :
__snake_case = 42
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__, nn.Convad ) or isinstance(UpperCamelCase__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class A :
__snake_case = 42
__snake_case = 42
__snake_case = 1
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = True
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = Tracker(self.dest )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip, UpperCamelCase__ ) )
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip, UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"
f" destination module has {len(UpperCamelCase__ )}." )
for dest_m, src_m in zip(UpperCamelCase__, UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
class A ( nn.Module ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase_ = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"Unexpected layer name {k}"
lowerCAmelCase_ = len(UpperCamelCase__ ) + 1
feature_blocks.append((f"res{block_index}", v) )
lowerCAmelCase_ = nn.ModuleDict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase__, out_feat_keys=UpperCamelCase__, feature_blocks=self._feature_blocks, )
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if x not in self:
lowerCAmelCase_ = self.convert_name_to_timm(UpperCamelCase__ )
lowerCAmelCase_ = partial(lambda: (timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ).eval(), None) )
else:
lowerCAmelCase_ = super().__getitem__(UpperCamelCase__ )
return val
class A ( __UpperCAmelCase ):
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowerCAmelCase_ = RegNetModel
else:
lowerCAmelCase_ = RegNetForImageClassification
return val
def __UpperCamelCase ( _A , _A , _A ):
for from_key, to_key in keys:
lowerCAmelCase_ = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A = True , ):
print(f"Converting {name}..." )
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ = from_model_func()
lowerCAmelCase_ = our_model_func(_A ).eval()
lowerCAmelCase_ = ModuleTransfer(src=_A , dest=_A , raise_if_mismatch=_A )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(_A )
if from_state_dict is not None:
lowerCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase_ = manually_copy_vissl_head(_A , our_model.state_dict() , _A )
our_model.load_state_dict(_A )
lowerCAmelCase_ = our_model(_A , output_hidden_states=_A )
lowerCAmelCase_ = (
our_outputs.logits if isinstance(_A , _A ) else our_outputs.last_hidden_state
)
lowerCAmelCase_ = from_model(_A )
lowerCAmelCase_ = from_output[-1] if type(_A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(_A , _A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_A , )
lowerCAmelCase_ = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_A , )
print(f"Pushed {name}" )
def __UpperCamelCase ( _A , _A = None , _A = True ):
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = 1000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
lowerCAmelCase_ = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
lowerCAmelCase_ = NameToOurModelFuncMap()
lowerCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_A , _A ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , model_dir=str(_A ) , map_location='''cpu''' )
lowerCAmelCase_ = model_func()
# check if we have a head, if yes add it
lowerCAmelCase_ = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase_ = model_state_dict['''trunk''']
model.load_state_dict(_A )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _A , _A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _A , _A , _A , )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 431 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
_snake_case : int = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
_snake_case : Dict = '</w>'
_snake_case : int = '@@ '
def A__ ( UpperCamelCase ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_snake_case : List[Any] = {'facebook/s2t-wav2vec2-large-en-de': 1024}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[int] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple="<s>" , __UpperCamelCase :Optional[int]="<pad>" , __UpperCamelCase :Any="</s>" , __UpperCamelCase :str="<unk>" , __UpperCamelCase :str=False , __UpperCamelCase :int=None , **__UpperCamelCase :List[str] , ):
super().__init__(
unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , do_lower_case=__UpperCamelCase , **__UpperCamelCase , )
A = do_lower_case
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
A = json.load(__UpperCamelCase )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
A = None
A = None
else:
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
A = merges_handle.read().split("\n" )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = {}
@property
def lowerCamelCase ( self :List[Any] ):
return len(self.decoder )
def lowerCamelCase ( self :Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :List[Any] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
A = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A, A = bigram
A = []
A = 0
while i < len(__UpperCamelCase ):
try:
A = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(__UpperCamelCase )
A = new_word
if len(__UpperCamelCase ) == 1:
break
else:
A = get_pairs(__UpperCamelCase )
A = " ".join(__UpperCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
A = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCamelCase ):
A = word.replace(__UpperCamelCase , "" )
A = word.replace(" " , __UpperCamelCase )
A = word
return word
def lowerCamelCase ( self :Dict , __UpperCamelCase :str ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(" " ) ) )
return split_tokens
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :int ):
A = self.decoder.get(__UpperCamelCase , self.unk_token )
return result
def lowerCamelCase ( self :str , __UpperCamelCase :List[str] ):
A = " ".join(__UpperCamelCase )
# make sure @@ tokens are concatenated
A = "".join(string.split(__UpperCamelCase ) )
return string
def lowerCamelCase ( self :Tuple , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
A = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 524 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''van'''
def __init__( self :Optional[int] , __UpperCamelCase :Tuple=2_24 , __UpperCamelCase :Tuple=3 , __UpperCamelCase :int=[7, 3, 3, 3] , __UpperCamelCase :List[str]=[4, 2, 2, 2] , __UpperCamelCase :str=[64, 1_28, 3_20, 5_12] , __UpperCamelCase :Union[str, Any]=[3, 3, 12, 3] , __UpperCamelCase :Dict=[8, 8, 4, 4] , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :str=0.02 , __UpperCamelCase :str=1e-6 , __UpperCamelCase :Tuple=1e-2 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :List[Any]=0.0 , **__UpperCamelCase :List[str] , ):
super().__init__(**__UpperCamelCase )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate
| 524 | 1 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list ):
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase_ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase_ ) == 1:
return True
a_ = series[1] - series[0]
for index in range(len(lowercase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase ( lowercase_ : list ):
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase_ ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ = 0
for val in series:
answer += val
return answer / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 536 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = data
a_ = None
def __str__( self ):
"""simple docstring"""
return f'{self.data}'
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = None
def __iter__( self ):
"""simple docstring"""
a_ = self.top
while node:
yield node.data
a_ = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self ):
"""simple docstring"""
return self.top is None
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = Node(UpperCamelCase__ )
if not self.is_empty():
a_ = self.top
a_ = node
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
a_ = self.top
a_ = self.top.next
return pop_node.data
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _a ( self ):
"""simple docstring"""
a_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536 | 1 |
import os
def UpperCAmelCase ( ):
A : Dict = os.path.join(os.path.dirname(_lowerCamelCase ) , "num.txt" )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 17 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : int = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : Optional[Any] = Node(1 )
__magic_name__ : Optional[int] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : List[Any] = Node(5 )
__magic_name__ : Optional[int] = Node(6 )
__magic_name__ : List[str] = Node(7 )
__magic_name__ : Optional[Any] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 124 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case : int = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
snake_case : List[str] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
snake_case : Tuple = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
snake_case : str = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
snake_case : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=[1, 10, 100] , _a=4 , _a=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_a ) as executor:
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = Counter()
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Optional[int] = defaultdict(_a )
for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ):
for candidate in candidates:
__magic_name__ : List[str] = candidate + "\n" + test_case
__magic_name__ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
__magic_name__ : Optional[Any] = executor.submit(_a , *_a )
futures.append(_a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_a ):
__magic_name__ : List[Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__magic_name__ , __magic_name__ : Optional[Any] = [], []
for result in results.values():
result.sort()
__magic_name__ : Any = [r[1]["passed"] for r in result]
total.append(len(_a ) )
correct.append(sum(_a ) )
__magic_name__ : List[Any] = np.array(_a )
__magic_name__ : Tuple = np.array(_a )
__magic_name__ : List[Any] = k
__magic_name__ : int = {f'''pass@{k}''': estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
def estimator(_snake_case : int , _snake_case : int , _snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = itertools.repeat(_snake_case , len(_snake_case ) )
else:
assert len(_snake_case ) == len(_snake_case )
__magic_name__ : int = iter(_snake_case )
return np.array([estimator(int(_snake_case ) , int(_snake_case ) , _snake_case ) for n, c in zip(_snake_case , _snake_case )] )
| 124 | 1 |
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ = np.zeros((n + 1,) )
UpperCamelCase__ = ya
UpperCamelCase__ = xa
for k in range(A ):
UpperCamelCase__ = y[k] + step_size * ode_func(A , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = use_cache
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = None
UpperCamelCase__ = decoder_seq_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = TrOCRDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
UpperCamelCase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Dict =False
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
pass
def _a (self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
| 469 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 226 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = IFInpaintingSuperResolutionPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
a_ = PipelineTesterMixin.required_optional_params - {"latents"}
def A ( self : int ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def A ( self : Optional[int] , _A : Dict , _A : Dict=0 ) -> int:
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self : Optional[Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def A ( self : List[Any] ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def A ( self : Optional[int] ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A ( self : str ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A ( self : Any ) -> Any:
self._test_save_load_local()
def A ( self : str ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 714 |
'''simple docstring'''
_UpperCamelCase : Optional[int] = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __UpperCAmelCase ( A : str ) -> int:
UpperCAmelCase_ : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( A : int ) -> str:
UpperCAmelCase_ : Any = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=8 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=3_6 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : int=1_6 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Dict=None , ) ->Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : List[Any] ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Optional[int] ) ->Optional[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : List[Any] ) ->str:
snake_case_ = self.get_config()
snake_case_ = 3_0_0
return config
def snake_case__( self : List[str] ) ->Optional[Any]:
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Dict:
snake_case_ = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , ) ->Optional[Any]:
snake_case_ = True
snake_case_ = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Any:
snake_case_ = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[str] ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) ->int:
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->Any:
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = ()
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Dict ) ->Optional[int]:
self.config_tester.run_common_tests()
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : int ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->str:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def snake_case__( self : Union[str, Any] ) ->Tuple:
return
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : List[Any] ) ->Optional[Any]:
snake_case_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->int:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) | 39 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = list(_A )
snake_case_ :Any = list(_A )
snake_case_ :Optional[Any] = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ :Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :Tuple = []
while True:
snake_case_ :int = ["$"] * len(_A )
snake_case_ :Union[str, Any] = []
for i in range(len(_A ) ):
for j in range(i + 1, len(_A ) ):
snake_case_ :Dict = compare_string(binary[i], binary[j] )
if k is False:
snake_case_ :Tuple = "*"
snake_case_ :List[str] = "*"
temp.append("X" )
for i in range(len(_A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_A ) == 0:
return pi
snake_case_ :Dict = list(set(_A ) )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = []
for minterm in minterms:
snake_case_ :Tuple = ""
for _ in range(_A ):
snake_case_ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_A )
return temp
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = list(_A )
snake_case_ :List[str] = list(_A )
snake_case_ :Dict = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = []
snake_case_ :List[Any] = [0] * len(_A )
for i in range(len(chart[0] ) ):
snake_case_ :List[Any] = 0
snake_case_ :Optional[Any] = -1
for j in range(len(_A ) ):
if chart[j][i] == 1:
count += 1
snake_case_ :Dict = j
if count == 1:
snake_case_ :str = 1
for i in range(len(_A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_A ) ):
snake_case_ :str = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ :Any = 0
snake_case_ :Optional[int] = -1
snake_case_ :List[Any] = 0
for i in range(len(_A ) ):
snake_case_ :str = chart[i].count(1 )
if count_n > max_n:
snake_case_ :Optional[Any] = count_n
snake_case_ :List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_A ) ):
snake_case_ :Any = 0
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )]
for i in range(len(_A ) ):
snake_case_ :Dict = prime_implicants[i].count("_" )
for j in range(len(_A ) ):
if is_for_table(prime_implicants[i], binary[j], _A ):
snake_case_ :Optional[int] = 1
return chart
def A ( ):
"""simple docstring"""
snake_case_ :str = int(input("Enter the no. of variables\n" ) )
snake_case_ :Dict = [
float(_A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ :Tuple = decimal_to_binary(_A, _A )
snake_case_ :Tuple = check(_A )
print("Prime Implicants are:" )
print(_A )
snake_case_ :List[Any] = prime_implicant_chart(_A, _A )
snake_case_ :int = selection(_A, _A )
print("Essential Prime Implicants are:" )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 584 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__=2 , a__=3 , a__=64 , a__=None ) -> Optional[Any]:
A = np.random.default_rng(a__ )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Optional[Any]:
return self.length
def __getitem__( self , a__ ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _UpperCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[Any]:
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def _UpperCAmelCase ( self , a__=None ) -> List[Any]:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class _UpperCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , a__=0 , a__=0 , a__=False ) -> List[str]:
super().__init__()
A = torch.nn.Parameter(torch.tensor(a__ ).float() )
A = torch.nn.Parameter(torch.tensor(a__ ).float() )
A = True
def _UpperCAmelCase ( self , a__=None ) -> int:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: int = 16 ) -> Any:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
A = load_dataset("""csv""" , data_files=UpperCamelCase__ )
A = datasets["""train"""].unique("""label""" )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__: Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
if "label" in examples:
A = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(UpperCamelCase__: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 702 |
from __future__ import annotations
import requests
def _lowerCAmelCase ( UpperCamelCase__: str ) -> dict:
"""simple docstring"""
A = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> list[dict]:
"""simple docstring"""
A = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> str:
"""simple docstring"""
A = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 546 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] ={
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = []
for rt in rc.restypes:
_lowerCAmelCase : Optional[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase : Dict = {name: i for i, name in enumerate(lowerCAmelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase : List[Any] = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : str = torch.tensor(
lowerCAmelCase__ , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCAmelCase : Optional[Any] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : Any = residx_atomaa_mask
_lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase : Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : Optional[int] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase : int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase : int = rc.restype_atoa[restype_letter]
_lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase : Dict = rc.atom_order[atom_name]
_lowerCAmelCase : int = 1
_lowerCAmelCase : int = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : int = residx_atomaa_mask
return protein
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = tree_map(lambda lowerCAmelCase__ : torch.tensor(lowerCAmelCase__ , device=batch["aatype"].device ) , lowerCAmelCase__ , np.ndarray )
_lowerCAmelCase : str = tensor_tree_map(lambda lowerCAmelCase__ : np.array(lowerCAmelCase__ ) , make_atomaa_masks(lowerCAmelCase__ ) )
return out
| 587 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = "▁"
snake_case = {"vocab_file": "sentencepiece.bpe.model"}
snake_case = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
snake_case = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class __A ( snake_case__ ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = ['''input_ids''', '''attention_mask''']
a_ = []
a_ = []
def __init__( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : int = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case , tgt_lang=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_lowerCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = len(self.sp_model )
_lowerCAmelCase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else "en_XX"
_lowerCAmelCase : Optional[Any] = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : Dict = None
return state
def __setstate__( self , _snake_case ):
_lowerCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[Any] = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = ""
_lowerCAmelCase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
_lowerCAmelCase : int = True
_lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
_lowerCAmelCase : Any = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_lowerCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
_lowerCAmelCase : Any = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : Optional[Any] = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(_snake_case )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = "en_XX" , _snake_case = None , _snake_case = "ro_RO" , **_snake_case , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : List[str] = self.lang_code_to_id[src_lang]
_lowerCAmelCase : List[str] = [self.cur_lang_code_id]
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : int = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase : List[str] = [self.cur_lang_code_id]
_lowerCAmelCase : int = [self.eos_token_id]
| 587 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 5_1_2, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __a ( A__ : Any , A__ : float = 0.0 , A__ : bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE = 1 - drop_prob
SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE = input.div(A__ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : Optional[float] = None ):
super().__init__()
SCREAMING_SNAKE_CASE = drop_prob
def _snake_case ( self : List[str] , __lowerCamelCase : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _snake_case ( self : List[str] ):
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=None ):
super().__init__()
SCREAMING_SNAKE_CASE = patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE = stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE = padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
SCREAMING_SNAKE_CASE = norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.projection(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.norm(UpperCamelCase__ )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
SCREAMING_SNAKE_CASE = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
SCREAMING_SNAKE_CASE = PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE = config.hidden_act
def _snake_case ( self : Tuple , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = self.conva(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.act_fn(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.drop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.conva(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.drop(UpperCamelCase__ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any ):
super().__init__()
SCREAMING_SNAKE_CASE = PoolFormerPooling(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
SCREAMING_SNAKE_CASE = PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
if self.use_layer_scale:
SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = ()
SCREAMING_SNAKE_CASE = self.output(self.after_norm(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
SCREAMING_SNAKE_CASE = pooling_output + hidden_states
SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE = hidden_states + layer_output
SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : List[str] ):
super().__init__()
SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE = nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = nn.ModuleList(UpperCamelCase__ )
def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=True ):
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE = embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = blk(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase__ = PoolFormerConfig
lowerCamelCase__ = """poolformer"""
lowerCamelCase__ = """pixel_values"""
lowerCamelCase__ = True
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , _UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Dict ):
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self : str , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE = self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , _UpperCamelCase , )
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : str ):
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = PoolFormerModel(UpperCamelCase__ )
# Final norm
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.LongTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE = outputs[0]
SCREAMING_SNAKE_CASE = self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = "single_label_classification"
else:
SCREAMING_SNAKE_CASE = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states ) | 16 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : List[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ["ConditionalDetrFeatureExtractor"]
UpperCAmelCase : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 | """simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Any = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase__ : Optional[Any] = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCamelCase__ : Any = args.output + '''.pt'''
lowerCamelCase__ : List[str] = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase__ : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase__ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase__ : Any = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase__ : Tuple = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase__ : Union[str, Any] = 8
lowerCamelCase__ : Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase__ : Any = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase__ : Optional[int] = key_name[-9:-7]
for i in range(16 ):
lowerCamelCase__ : str = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase__ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Union[str, Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase__ : int = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase__ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[Any] = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase__ : Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : Tuple = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
lowerCamelCase__ : str = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase__ : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase__ : Optional[Any] = state[:, 0, :, :]
lowerCamelCase__ : int = state[:, 1, :, :]
lowerCamelCase__ : Optional[int] = state[:, 2, :, :]
lowerCamelCase__ : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
lowerCamelCase__ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
lowerCamelCase__ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase__ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase__ : List[str] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase__ : Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
lowerCamelCase__ : str = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase__ : int = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Any = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase__ : Optional[int] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase__ : List[Any] = '''model.%s.weight''' % nlayer
lowerCamelCase__ : List[Any] = vnp.copy() # same in embedded
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
lowerCamelCase__ : str = '''lm_head.weight'''
lowerCamelCase__ : Dict = vnp.copy() # same in embedded
lowerCamelCase__ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase__ : List[Any] = '''final_logits_bias'''
lowerCamelCase__ : List[str] = vnp.copy() # same in embedded
lowerCamelCase__ : int = state.reshape((1, -1) )
lowerCamelCase__ : Optional[int] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
lowerCamelCase__ : List[Any] = '''model.last_project.weight'''
lowerCamelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
lowerCamelCase__ : Dict = '''model.last_project.bias'''
lowerCamelCase__ : Tuple = vnp.copy() # same because it is one dimensional
lowerCamelCase__ : Dict = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
UpperCAmelCase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 121 | 0 |
'''simple docstring'''
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Dict = class_size
snake_case: Optional[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self.mlp(SCREAMING_SNAKE_CASE__ )
return logits | 329 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A : Any , __A : List[str]=False ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case: str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase_ ( __A : Tuple , __A : List[str] , __A : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case: Union[str, Any] = ''
else:
snake_case: Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case: Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case: Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case: Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case: Dict = in_proj_bias[: config.hidden_size]
snake_case: Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case: Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case: Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case: List[str] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowerCAmelCase_ ( __A : str , __A : List[Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: str = dct.pop(__A )
snake_case: Tuple = val
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case: Tuple = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : Tuple ):
'''simple docstring'''
snake_case: Optional[Any] = ViTConfig()
snake_case: Tuple = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case: Optional[int] = True
snake_case: Tuple = int(vit_name[-12:-10] )
snake_case: Tuple = int(vit_name[-9:-6] )
else:
snake_case: Optional[int] = 10_00
snake_case: Optional[Any] = 'huggingface/label-files'
snake_case: Union[str, Any] = 'imagenet-1k-id2label.json'
snake_case: List[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
snake_case: Dict = {int(__A ): v for k, v in idalabel.items()}
snake_case: List[str] = idalabel
snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
snake_case: Any = int(vit_name[-6:-4] )
snake_case: Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
snake_case: Optional[Any] = 1_92
snake_case: Union[str, Any] = 7_68
snake_case: Tuple = 12
snake_case: Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
snake_case: List[Any] = 3_84
snake_case: Optional[int] = 15_36
snake_case: List[Any] = 12
snake_case: Any = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
snake_case: Optional[Any] = 7_68
snake_case: Optional[Any] = 23_04
snake_case: Optional[int] = 8
snake_case: Union[str, Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
snake_case: List[str] = 10_24
snake_case: Union[str, Any] = 40_96
snake_case: str = 24
snake_case: Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
snake_case: Optional[Any] = 12_80
snake_case: Dict = 51_20
snake_case: Dict = 32
snake_case: Union[str, Any] = 16
# load original model from timm
snake_case: List[str] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case: int = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
snake_case: Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case: str = ViTModel(__A ).eval()
else:
snake_case: Union[str, Any] = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case: List[Any] = DeiTImageProcessor(size=config.image_size )
else:
snake_case: Tuple = ViTImageProcessor(size=config.image_size )
snake_case: Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case: Any = encoding['pixel_values']
snake_case: List[str] = model(__A )
if base_model:
snake_case: Tuple = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
snake_case: List[str] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 329 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : Union[str, Any] = text, pattern
lowercase__ , lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_), len(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = []
for i in range(self.textLen - self.patLen + 1):
lowercase__ : Optional[int] = self.mismatch_in_text(SCREAMING_SNAKE_CASE_)
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : int = self.match_in_pattern(self.text[mismatch_index])
lowercase__ : List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase__ : List[str] = """ABAABA"""
lowerCamelCase__ : Tuple = """AB"""
lowerCamelCase__ : Optional[int] = BoyerMooreSearch(text, pattern)
lowerCamelCase__ : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 495 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : torch.FloatTensor
__lowerCAmelCase : torch.FloatTensor
__lowerCAmelCase : Optional[torch.FloatTensor] = None
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 1.0_0_7 , SCREAMING_SNAKE_CASE_ = 80 , SCREAMING_SNAKE_CASE_ = 0.0_5 , SCREAMING_SNAKE_CASE_ = 50 , ):
'''simple docstring'''
lowercase__ : List[Any] = sigma_max
# setable values
lowercase__ : int = None
lowercase__ : np.IntTensor = None
lowercase__ : torch.FloatTensor = None # sigma(t_i)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
return sample
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Dict = num_inference_steps
lowercase__ : List[Any] = np.arange(0 , self.num_inference_steps)[::-1].copy()
lowercase__ : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
lowercase__ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : Dict = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_).to(sample.device)
lowercase__ : int = sigma + gamma * sigma
lowercase__ : Optional[int] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
'''simple docstring'''
lowercase__ : str = sample_hat + sigma_hat * model_output
lowercase__ : str = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
'''simple docstring'''
lowercase__ : str = sample_prev + sigma_prev * model_output
lowercase__ : Dict = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
raise NotImplementedError()
| 495 | 1 |
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def a_ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 328 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase__: Optional[Any] = TypeVar("T")
class SCREAMING_SNAKE_CASE( Generic[T] ):
"""simple docstring"""
lowerCamelCase__ = 42 # Cache store of keys
lowerCamelCase__ = 42 # References of the keys in cache
lowerCamelCase__ = 10 # Maximum capacity of cache
def __init__( self : Optional[int] , __snake_case : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : int = set()
if not n:
UpperCAmelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
UpperCAmelCase : Union[str, Any] = n
def A ( self : Union[str, Any] , __snake_case : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : List[Any] = self.dq_store.pop()
self.key_reference.remove(__A )
else:
self.dq_store.remove(__A )
self.dq_store.appendleft(__A )
self.key_reference.add(__A )
def A ( self : Any ) -> None:
for k in self.dq_store:
print(__A )
def __repr__( self : Any ) -> str:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: Dict = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase__: List[Any] = logging.getLogger(__name__)
def snake_case_ ( ) -> int:
UpperCAmelCase : int = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_lowerCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_lowerCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_lowerCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_lowerCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_lowerCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_lowerCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_lowerCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCAmelCase : List[str] = parser.parse_args()
return args
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
def fn(_lowerCAmelCase : Tuple ):
return tokenizer(examples['''text'''] )
return fn
def snake_case_ ( _lowerCAmelCase : int ) -> Dict:
UpperCAmelCase : Optional[Any] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCAmelCase : Optional[Any] = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCAmelCase : Optional[Any] = tf.train.Features(feature=_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = tf.train.Example(features=_lowerCAmelCase )
UpperCAmelCase : Any = example.SerializeToString()
records.append(_lowerCAmelCase )
return records
def snake_case_ ( _lowerCAmelCase : List[str] ) -> List[str]:
UpperCAmelCase : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase : List[Any] = min(len(_lowerCAmelCase ) , args.limit )
UpperCAmelCase : Dict = dataset.select(range(_lowerCAmelCase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
else:
UpperCAmelCase : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase : Optional[Any] = tokenize_function(_lowerCAmelCase )
UpperCAmelCase : int = dataset.map(_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCAmelCase : List[str] ):
# Concatenate all texts.
UpperCAmelCase : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase : str = dataset_tokenized.map(_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=1000 , num_proc=4 )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Dict = 0
for shard in range(0 , len(_lowerCAmelCase ) , args.shard_size ):
UpperCAmelCase : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase : Optional[int] = len(dataset_snapshot['''input_ids'''] )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCAmelCase : List[Any] = get_serialized_examples(_lowerCAmelCase )
with tf.io.TFRecordWriter(_lowerCAmelCase ) as out_file:
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase : Any = serialized_examples[i]
out_file.write(_lowerCAmelCase )
print('''Wrote file {} containing {} records'''.format(_lowerCAmelCase , _lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: List[Any] = parse_args()
main(args)
| 528 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase ( nn.Module ):
def __init__( self ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = nn.Linear(3 , 4 )
lowerCAmelCase = nn.BatchNormad(4 )
lowerCAmelCase = nn.Linear(4 , 5 )
def _snake_case ( self , lowercase ) -> Any:
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self , lowercase , *lowercase , **lowercase ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self , lowercase , lowercase ) -> str:
return output + 1
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = ModelHook()
add_hook_to_module(lowercase , lowercase )
self.assertEqual(test_model._hf_hook , lowercase )
self.assertTrue(hasattr(lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(lowercase , """_old_forward""" ) )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = ModelHook()
add_hook_to_module(lowercase , lowercase )
add_hook_to_module(lowercase , lowercase , append=lowercase )
self.assertEqual(isinstance(test_model._hf_hook , lowercase ) , lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(lowercase )
self.assertFalse(hasattr(lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(lowercase , """_old_forward""" ) )
def _snake_case ( self ) -> str:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(x + 1 )
lowerCAmelCase = test_model(x + 2 )
lowerCAmelCase = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase = PreForwardHook()
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
assert torch.allclose(lowercase , lowercase , atol=1e-5 )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(lowercase )
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
assert torch.allclose(lowercase , output + 2 , atol=1e-5 )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(lowercase )
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowercase , lowercase )
lowerCAmelCase = test_model(lowercase )
self.assertTrue(torch.allclose(lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase = True
lowerCAmelCase = test_model(lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _snake_case ( self ) -> Dict:
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase , AlignDevicesHook(io_same_device=lowercase ) )
lowerCAmelCase = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCAmelCase = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase , offload_buffers=lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _snake_case ( self ) -> str:
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(lowercase )
self.assertEqual(model.batchnorm.running_mean.device , lowercase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() , offload_buffers=lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowercase )
self.assertEqual(output.device , lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 532 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
lowerCamelCase =parser.parse_args()
if args.model_type == "bert":
lowerCamelCase =BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase ="bert"
else:
raise ValueError("args.model_type should be \"bert\".")
lowerCamelCase =model.state_dict()
lowerCamelCase ={}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase =state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase =state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase =0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase =state_dict["cls.predictions.decoder.weight"]
lowerCamelCase =state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase =state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase =state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 462 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase__ : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCamelCase__ : Tuple = PipelineDataFormat.from_str(
format=UpperCamelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase__ , UpperCamelCase__ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = nlp
UpperCamelCase__ : Dict = reader
@staticmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase__ : List[str] = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase__ : Union[str, Any] = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 462 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'CLIPImageProcessor'
__lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self :int , _lowercase :Optional[Any]=None , _lowercase :Dict=None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
lowercase__ = kwargs.pop("feature_extractor" )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
def __call__( self :Tuple , _lowercase :Optional[int]=None , _lowercase :int=None , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
lowercase__ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase ( self :Any , *_lowercase :Dict , **_lowercase :Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] , *_lowercase :Union[str, Any] , **_lowercase :Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor
| 655 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
"""simple docstring"""
import numpy as np
def _lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float = 1E-1_2 , _SCREAMING_SNAKE_CASE : int = 100 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
__A : Any = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A : Tuple = False
__A : List[str] = 0
__A : Tuple = 0
__A : Tuple = 1E1_2
while not convergence:
# Multiple matrix by the vector.
__A : List[str] = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__A : Dict = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A : Any = vector.conj().T if is_complex else vector.T
__A : List[str] = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
__A : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A : Optional[Any] = True
__A : str = lambda_
if is_complex:
__A : Tuple = np.real(lambda_ )
return lambda_, vector
def _lowercase ( ) -> None:
'''simple docstring'''
__A : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__A : List[Any] = np.array([41, 4, 20] )
__A : Optional[Any] = real_input_matrix.astype(np.complexaaa )
__A : List[str] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A : Optional[int] = real_input_matrix
__A : Any = real_vector
elif problem_type == "complex":
__A : List[Any] = complex_input_matrix
__A : Union[str, Any] = complex_vector
# Our implementation.
__A , __A : Optional[Any] = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A : Union[str, Any] = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__A : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 237 | """simple docstring"""
from __future__ import annotations
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
'''simple docstring'''
__A : List[Any] = [True] * limit
__A : Any = False
__A : Any = False
__A : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__A : int = i * 2
while index < limit:
__A : Union[str, Any] = False
__A : Optional[Any] = index + i
__A : int = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 1000000 ) -> int:
'''simple docstring'''
__A : Optional[int] = prime_sieve(_SCREAMING_SNAKE_CASE )
__A : Tuple = 0
__A : Union[str, Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length , len(_SCREAMING_SNAKE_CASE ) ):
__A : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__A : Union[str, Any] = j - i
__A : Dict = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 237 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowercase (lowerCAmelCase_ ):
"""simple docstring"""
def __get__( self , A , A=None ) -> List[str]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
snake_case : List[Any] = '''__cached_''' + self.fget.__name__
snake_case : Union[str, Any] = getattr(A , A , A )
if cached is None:
snake_case : Union[str, Any] = self.fget(A )
setattr(A , A , A )
return cached
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if is_torch_fx_proxy(lowercase ):
return True
if is_torch_available():
import torch
if isinstance(lowercase ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase ,(jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase ,np.ndarray )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
return isinstance(lowercase ,np.ndarray )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return _is_numpy(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
import torch
return isinstance(lowercase ,torch.Tensor )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return False if not is_torch_available() else _is_torch(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
import torch
return isinstance(lowercase ,torch.device )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return False if not is_torch_available() else _is_torch_device(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
import torch
if isinstance(lowercase ,lowercase ):
if hasattr(lowercase ,lowercase ):
snake_case : Tuple = getattr(lowercase ,lowercase )
else:
return False
return isinstance(lowercase ,torch.dtype )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return False if not is_torch_available() else _is_torch_dtype(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
import tensorflow as tf
return isinstance(lowercase ,tf.Tensor )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase ,"""is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(lowercase )
return type(lowercase ) == tf.Tensor
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase ,jnp.ndarray )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return False if not is_flax_available() else _is_jax(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if isinstance(lowercase ,(dict, UserDict) ):
return {k: to_py_obj(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase ,(list, tuple) ):
return [to_py_obj(lowercase ) for o in obj]
elif is_tf_tensor(lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase ).tolist()
elif isinstance(lowercase ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
if isinstance(lowercase ,(dict, UserDict) ):
return {k: to_numpy(lowercase ) for k, v in obj.items()}
elif isinstance(lowercase ,(list, tuple) ):
return np.array(lowercase )
elif is_tf_tensor(lowercase ):
return obj.numpy()
elif is_torch_tensor(lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase ):
return np.asarray(lowercase )
else:
return obj
class __lowercase (lowerCAmelCase_ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
snake_case : str = fields(self )
# Safety and consistency checks
if not len(A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
snake_case : int = getattr(self , class_fields[0].name )
snake_case : str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A ):
if isinstance(A , A ):
snake_case : str = first_field.items()
snake_case : Dict = True
else:
try:
snake_case : Optional[Any] = iter(A )
snake_case : Dict = True
except TypeError:
snake_case : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A ):
if (
not isinstance(A , (list, tuple) )
or not len(A ) == 2
or not isinstance(element[0] , A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case : List[str] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case : Dict = element[1]
elif first_field is not None:
snake_case : List[str] = first_field
else:
for field in class_fields:
snake_case : Any = getattr(self , field.name )
if v is not None:
snake_case : Union[str, Any] = v
def __delitem__( self , *A , **A ) -> Optional[Any]:
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self , *A , **A ) -> Union[str, Any]:
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self , *A , **A ) -> Union[str, Any]:
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase ( self , *A , **A ) -> Any:
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , A ) -> Dict:
if isinstance(A , A ):
snake_case : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A , A ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A , A )
super().__setattr__(A , A )
def __setitem__( self , A , A ) -> Any:
# Will raise a KeyException if needed
super().__setitem__(A , A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A , A )
def UpperCAmelCase ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __lowercase (lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls , A ) -> Optional[int]:
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __lowercase (lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = "longest"
_snake_case = "max_length"
_snake_case = "do_not_pad"
class __lowercase (lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = "pt"
_snake_case = "tf"
_snake_case = "np"
_snake_case = "jax"
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> str:
snake_case : Dict = context_managers
snake_case : Any = ExitStack()
def __enter__( self ) -> List[str]:
for context_manager in self.context_managers:
self.stack.enter_context(A )
def __exit__( self , *A , **A ) -> str:
self.stack.__exit__(*A , **A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : Any = infer_framework(lowercase )
if framework == "tf":
snake_case : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Optional[int] = model_class.__name__
snake_case : str = infer_framework(lowercase )
if framework == "tf":
snake_case : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case : Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = "" ,lowercase = "." ) -> List[str]:
def _flatten_dict(lowercase ,lowercase="" ,lowercase="." ):
for k, v in d.items():
snake_case : Any = str(lowercase ) + delimiter + str(lowercase ) if parent_key else k
if v and isinstance(lowercase ,lowercase ):
yield from flatten_dict(lowercase ,lowercase ,delimiter=lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase ,lowercase ,lowercase ) )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> Dict:
if is_numpy_array(lowercase ):
return np.transpose(lowercase ,axes=lowercase )
elif is_torch_tensor(lowercase ):
return array.T if axes is None else array.permute(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.transpose(lowercase ,perm=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.transpose(lowercase ,axes=lowercase )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowercase )}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if is_numpy_array(lowercase ):
return np.reshape(lowercase ,lowercase )
elif is_torch_tensor(lowercase ):
return array.reshape(*lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.reshape(lowercase ,lowercase )
elif is_jax_tensor(lowercase ):
return jnp.reshape(lowercase ,lowercase )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowercase )}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> Tuple:
if is_numpy_array(lowercase ):
return np.squeeze(lowercase ,axis=lowercase )
elif is_torch_tensor(lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.squeeze(lowercase ,axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.squeeze(lowercase ,axis=lowercase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowercase )}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
if is_numpy_array(lowercase ):
return np.expand_dims(lowercase ,lowercase )
elif is_torch_tensor(lowercase ):
return array.unsqueeze(dim=lowercase )
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.expand_dims(lowercase ,axis=lowercase )
elif is_jax_tensor(lowercase ):
return jnp.expand_dims(lowercase ,axis=lowercase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowercase )}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
if is_numpy_array(lowercase ):
return np.size(lowercase )
elif is_torch_tensor(lowercase ):
return array.numel()
elif is_tf_tensor(lowercase ):
import tensorflow as tf
return tf.size(lowercase )
elif is_jax_tensor(lowercase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowercase )}.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
for key, value in auto_map.items():
if isinstance(lowercase ,(tuple, list) ):
snake_case : List[Any] = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case : Dict = f"""{repo_id}--{value}"""
return auto_map
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
for base_class in inspect.getmro(lowercase ):
snake_case : Union[str, Any] = base_class.__module__
snake_case : List[str] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 587 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_A : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict , A : int = 1_0_1 ) ->Dict:
lowerCamelCase__ : Optional[int] = length
def __len__( self : Union[str, Any] ) ->Any:
return self.length
def __getitem__( self : str , A : str ) ->int:
return i
class __SCREAMING_SNAKE_CASE :
def __call__( self : Tuple , A : Any ) ->Any:
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ) ->Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase__ : List[str] = nn.Linear(1_2_0 , 8_0 )
def __lowerCamelCase ( self : Tuple , A : Dict , A : List[Any]=None ) ->Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@require_torch_neuroncore
def __lowerCamelCase ( self : int ) ->Optional[Any]:
lowerCamelCase__ : Optional[Any] = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCamelCase__ : int = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Tuple = F"--output_dir {output_dir}".split()
lowerCamelCase__ : Any = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@require_torch_multi_gpu
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ : Union[str, Any] = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
lowerCamelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : str = F"--output_dir {output_dir}".split()
lowerCamelCase__ : int = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_A : List[str] = HfArgumentParser((TrainingArguments,))
_A : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_A : List[str] = DummyDataset(dataset_length)
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = list(range(len(UpperCAmelCase ) ) )
lowerCamelCase__ : List[str] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_A : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_A : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_A : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_A : Union[str, Any] = 2
_A : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_A : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_A : List[Any] = None
| 315 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case__ : str = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __lowerCamelCase ( A__ : Any , A__ : Optional[Any]=None ) -> Any:
require_version(deps[pkg] , A__ )
| 171 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCamelCase ( A__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
lowerCamelCase_ : Optional[Any] = []
if isinstance(A__ , A__ ):
for v in tree.values():
shapes.extend(_fetch_dims(A__ ) )
elif isinstance(A__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(A__ ) )
elif isinstance(A__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowerCamelCase ( A__ : int , A__ : Tuple[int, ...] ) -> Tuple[int, ...]:
lowerCamelCase_ : int = []
for d in reversed(A__ ):
idx.append(flat_idx % d )
lowerCamelCase_ : str = flat_idx // d
return tuple(reversed(A__ ) )
@torch.jit.ignore
def __lowerCamelCase ( A__ : Sequence[int] , A__ : Sequence[int] , A__ : Sequence[int] , A__ : Optional[Sequence[bool]] = None , A__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A__ : List[bool] ) -> None:
lowerCamelCase_ : Dict = True
for i in range(len(A__ ) ):
lowerCamelCase_ : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ : Optional[int] = l[reversed_idx]
if start_edges is None:
lowerCamelCase_ : List[str] = [s == 0 for s in start]
reduce_edge_list(A__ )
if end_edges is None:
lowerCamelCase_ : Any = [e == (d - 1) for e, d in zip(A__ , A__ )]
reduce_edge_list(A__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(A__ ) == 0:
return [()]
elif len(A__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase_ : List[Tuple[slice, ...]] = []
lowerCamelCase_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(A__ , A__ ):
if s == e:
path_list.append(slice(A__ , s + 1 ) )
else:
break
lowerCamelCase_ : Tuple[slice, ...] = tuple(A__ )
lowerCamelCase_ : Optional[int] = len(A__ )
# start == end, and we're done
if divergence_idx == len(A__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ : Dict = start[divergence_idx]
return tuple(
path + (slice(A__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ : str = end[divergence_idx]
return tuple(
path + (slice(A__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCamelCase ( A__ : torch.Tensor , A__ : int , A__ : int , A__ : int ) -> torch.Tensor:
lowerCamelCase_ : int = t.shape[:no_batch_dims]
lowerCamelCase_ : List[str] = list(_flat_idx_to_idx(A__ , A__ ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ : Dict = list(_flat_idx_to_idx(flat_end - 1 , A__ ) )
# Get an ordered list of slices to perform
lowerCamelCase_ : Dict = _get_minimal_slice_set(
A__ , A__ , A__ , )
lowerCamelCase_ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCamelCase ( A__ : Callable , A__ : Dict[str, Any] , A__ : int , A__ : int , A__ : bool = False , A__ : Any = None , A__ : bool = False , ) -> Any:
if not (len(A__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowerCamelCase_ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(A__ )]
lowerCamelCase_ : Optional[Any] = tuple([max(A__ ) for s in zip(*A__ )] )
def _prep_inputs(A__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase_ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , A__ )
lowerCamelCase_ : Optional[Any] = None
if _out is not None:
lowerCamelCase_ : str = tensor_tree_map(lambda A__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase_ : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ : str = 0
lowerCamelCase_ : str = prepped_outputs
for _ in range(A__ ):
# Chunk the input
if not low_mem:
lowerCamelCase_ : Tuple = _select_chunk
else:
lowerCamelCase_ : Any = partial(
_chunk_slice , flat_start=A__ , flat_end=min(A__ , i + chunk_size ) , no_batch_dims=len(A__ ) , )
lowerCamelCase_ : Dict[str, Any] = tensor_tree_map(A__ , A__ )
# Run the layer on the chunk
lowerCamelCase_ : str = layer(**A__ )
# Allocate space for the output
if out is None:
lowerCamelCase_ : Optional[Any] = tensor_tree_map(lambda A__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A__ )
# Put the chunk in its pre-allocated space
if isinstance(A__ , A__ ):
def assign(A__ : dict , A__ : dict ) -> None:
for k, v in da.items():
if isinstance(A__ , A__ ):
assign(A__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ : Optional[Any] = da[k]
assign(A__ , A__ )
elif isinstance(A__ , A__ ):
for xa, xa in zip(A__ , A__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ : List[str] = xa
elif isinstance(A__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ : int = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowerCamelCase_ : Tuple = tensor_tree_map(lambda A__ : t.view(orig_batch_dims + t.shape[1:] ) , A__ )
return out
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int , __a : int = 512 , ) ->Tuple:
lowerCamelCase_ : Optional[int] = max_chunk_size
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Optional[tuple] = None
def _lowerCAmelCase ( self : Tuple , __a : Callable , __a : tuple , __a : int ) ->int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ : Dict = [c for c in candidates if c > min_chunk_size]
lowerCamelCase_ : Any = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
lowerCamelCase_ : str = 0
lowerCamelCase_ : List[Any] = len(__a ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ : List[Any] = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ : Dict = i
lowerCamelCase_ : str = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCAmelCase ( self : int , __a : Iterable , __a : Iterable ) ->bool:
lowerCamelCase_ : str = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
lowerCamelCase_ : Any = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
lowerCamelCase_ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowerCAmelCase ( self : str , __a : Callable , __a : tuple , __a : int , ) ->int:
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
lowerCamelCase_ : int = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ : Optional[int] = False
if not consistent:
lowerCamelCase_ : List[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
lowerCamelCase_ : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 171 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( lowercase__ : Optional[int] ) -> int:
UpperCamelCase__ :Optional[int] = os.path.join(args.tf_model_dir , """parameters.json""" )
UpperCamelCase__ :List[str] = json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
UpperCamelCase__ :Union[str, Any] = args.output + """.pt"""
UpperCamelCase__ :str = OrderedDict()
with tf.device("""/CPU:0""" ):
UpperCamelCase__ :List[str] = tf.train.load_checkpoint(args.tf_model_dir )
UpperCamelCase__ :Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCamelCase__ :Dict = reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
UpperCamelCase__ :Optional[int] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
UpperCamelCase__ :Tuple = 8
UpperCamelCase__ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCamelCase__ :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.startswith("""model/moe""" ):
UpperCamelCase__ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
UpperCamelCase__ :int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
UpperCamelCase__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
UpperCamelCase__ :Any = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
UpperCamelCase__ :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Dict = torch.tensor(lowercase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
UpperCamelCase__ :Dict = key_name[-9:-7]
for i in range(16 ):
UpperCamelCase__ :Optional[Any] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
UpperCamelCase__ :int = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCamelCase__ :int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/mlp""" ):
UpperCamelCase__ :Dict = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
UpperCamelCase__ :Optional[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
UpperCamelCase__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p1/bias""" ):
UpperCamelCase__ :Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
UpperCamelCase__ :Dict = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/kernel""" ):
UpperCamelCase__ :List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
UpperCamelCase__ :int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/bias""" ):
UpperCamelCase__ :Optional[int] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
UpperCamelCase__ :List[Any] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.startswith("""model/ln""" ):
UpperCamelCase__ :Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCamelCase__ :List[Any] = """model.blocks.%d.feed_forward.norm.bias""" % player
UpperCamelCase__ :List[str] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
UpperCamelCase__ :Any = """model.blocks.%d.feed_forward.norm.weight""" % player
UpperCamelCase__ :Dict = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/att""" ):
UpperCamelCase__ :List[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
UpperCamelCase__ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCamelCase__ :Union[str, Any] = state[:, 0, :, :]
UpperCamelCase__ :Union[str, Any] = state[:, 1, :, :]
UpperCamelCase__ :List[Any] = state[:, 2, :, :]
UpperCamelCase__ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Tuple = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
UpperCamelCase__ :Optional[int] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
UpperCamelCase__ :List[str] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
elif key_name.endswith("""/o/kernel""" ):
UpperCamelCase__ :Dict = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
UpperCamelCase__ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/an""" ):
UpperCamelCase__ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCamelCase__ :Optional[int] = """model.blocks.%d.self_attn.norm.bias""" % player
UpperCamelCase__ :Optional[int] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
UpperCamelCase__ :str = """model.blocks.%d.self_attn.norm.weight""" % player
UpperCamelCase__ :Tuple = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
UpperCamelCase__ :int = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
UpperCamelCase__ :List[Any] = """model.%s.weight""" % nlayer
UpperCamelCase__ :str = vnp.copy() # same in embedded
UpperCamelCase__ :Dict = torch.tensor(lowercase__ )
if key_name.startswith("""model/wte""" ):
UpperCamelCase__ :Any = """lm_head.weight"""
UpperCamelCase__ :Optional[int] = vnp.copy() # same in embedded
UpperCamelCase__ :Tuple = torch.tensor(lowercase__ )
elif key_name.startswith("""model/wob""" ):
UpperCamelCase__ :Dict = """final_logits_bias"""
UpperCamelCase__ :Optional[Any] = vnp.copy() # same in embedded
UpperCamelCase__ :Optional[Any] = state.reshape((1, -1) )
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
UpperCamelCase__ :Tuple = """model.last_project.weight"""
UpperCamelCase__ :List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Tuple = torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
UpperCamelCase__ :List[Any] = """model.last_project.bias"""
UpperCamelCase__ :Any = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[Any] = torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 45 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A = object()
# For specifying empty leaf dict `{}`
__A = object()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
__lowerCAmelCase: Tuple = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Any = _get_partition_rules()
__lowerCAmelCase: List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
__lowerCAmelCase: Any = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 346 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 | '''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def _lowercase (*__a : Any , **__a : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class __A ( unittest.TestCase ):
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@require_tf
def _lowercase (self : List[str] ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@slow
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 415 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case_ : Union[str, Any] ,snake_case_ : Dict ,snake_case_ : Dict ,snake_case_ : Dict ) ->Union[str, Any]:
'''simple docstring'''
__A : List[str] = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
__A : str = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
else:
__A : List[Any] = BigBirdForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,is_trivia_qa=SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 177 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,snake_case ,snake_case=3 ,snake_case=32 ,snake_case=3 ,snake_case=10 ,snake_case=[10, 20, 30, 40] ,snake_case=[1, 1, 2, 1] ,snake_case=True ,snake_case=True ,snake_case="relu" ,snake_case=3 ,snake_case=None ,):
'''simple docstring'''
lowercase : List[str] = parent
lowercase : Union[str, Any] = batch_size
lowercase : Union[str, Any] = image_size
lowercase : Optional[Any] = num_channels
lowercase : Any = embeddings_size
lowercase : str = hidden_sizes
lowercase : Union[str, Any] = depths
lowercase : List[Any] = is_training
lowercase : Union[str, Any] = use_labels
lowercase : Dict = hidden_act
lowercase : str = num_labels
lowercase : Union[str, Any] = scope
lowercase : Optional[Any] = len(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : str = None
if self.use_labels:
lowercase : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
lowercase : Any = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = TFRegNetModel(config=snake_case )
lowercase : int = model(snake_case ,training=snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.num_labels
lowercase : Any = TFRegNetForImageClassification(snake_case )
lowercase : int = model(snake_case ,labels=snake_case ,training=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_a : Optional[Any]= (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_a : Dict= False
_a : List[str]= False
_a : str= False
_a : str= False
_a : Union[str, Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = TFRegNetModelTester(self )
lowercase : Tuple = ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(snake_case )
lowercase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case ,snake_case ,snake_case ):
lowercase : int = model_class(snake_case )
lowercase : List[Any] = model(**self._prepare_for_class(snake_case ,snake_case ) ,training=snake_case )
lowercase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : int = self.model_tester.num_stages
self.assertEqual(len(snake_case ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : List[str] = layer_type
lowercase : Dict = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case ,snake_case ,snake_case ,snake_case={} ):
lowercase : Optional[Any] = model(snake_case ,return_dict=snake_case ,**snake_case )
lowercase : List[Any] = model(snake_case ,return_dict=snake_case ,**snake_case ).to_tuple()
def recursive_check(snake_case ,snake_case ):
if isinstance(snake_case ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case ,snake_case ):
recursive_check(snake_case ,snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(snake_case ,snake_case ) ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) ,)
recursive_check(snake_case ,snake_case )
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(snake_case )
lowercase : Optional[Any] = self._prepare_for_class(snake_case ,snake_case )
lowercase : Dict = self._prepare_for_class(snake_case ,snake_case )
check_equivalence(snake_case ,snake_case ,snake_case )
lowercase : str = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
lowercase : List[Any] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
check_equivalence(snake_case ,snake_case ,snake_case )
lowercase : Optional[int] = self._prepare_for_class(snake_case ,snake_case )
lowercase : str = self._prepare_for_class(snake_case ,snake_case )
check_equivalence(snake_case ,snake_case ,snake_case ,{"""output_hidden_states""": True} )
lowercase : Optional[Any] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
lowercase : Optional[int] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
check_equivalence(snake_case ,snake_case ,snake_case ,{"""output_hidden_states""": True} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = TFRegNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _snake_case( ) -> Any:
lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : List[Any] = self.default_image_processor
lowercase : Optional[Any] = prepare_img()
lowercase : Union[str, Any] = image_processor(images=snake_case ,return_tensors="""tf""" )
# forward pass
lowercase : List[Any] = model(**snake_case ,training=snake_case )
# verify the logits
lowercase : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
lowercase : int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,snake_case ,atol=1e-4 )
| 336 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = (DEISMultistepScheduler,)
__UpperCAmelCase = (("num_inference_steps", 25),)
def A ( self , **snake_case_ ) -> int:
'''simple docstring'''
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**snake_case_ )
return config
def A ( self , snake_case_=0 , **snake_case_ ) -> List[Any]:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , snake_case_ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**snake_case_ )
__lowercase = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
__lowercase = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase , __lowercase = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
__lowercase = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self ) -> Dict:
'''simple docstring'''
pass
def A ( self , snake_case_=0 , **snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , snake_case_ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
__lowercase = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
__lowercase = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self , snake_case_=None , **snake_case_ ) -> Optional[int]:
'''simple docstring'''
if scheduler is None:
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**snake_case_ )
__lowercase = scheduler_class(**snake_case_ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**snake_case_ )
__lowercase = scheduler_class(**snake_case_ )
__lowercase = 1_0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def A ( self ) -> str:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , snake_case_ )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**snake_case_ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , '''set_timesteps''' ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , '''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
__lowercase = scheduler.timesteps[5]
__lowercase = scheduler.timesteps[6]
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowercase = self.full_loop(scheduler=snake_case_ )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowercase = DEISMultistepScheduler.from_config(scheduler.config )
__lowercase = self.full_loop(scheduler=snake_case_ )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def A ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def A ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type='''deis''' , solver_order=snake_case_ , solver_type=snake_case_ , )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
__lowercase = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def A ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def A ( self ) -> List[str]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.full_loop()
__lowercase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
__lowercase = scheduler_class(**snake_case_ )
__lowercase = 1_0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(snake_case_ , snake_case_ )
__lowercase = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 527 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.