code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
if not sentence:
return ""
lowercase__ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Dict = [0] * no_of_processes
lowercase__ : str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = burst_time[i]
lowercase__ : Any = 0
lowercase__ : int = 0
lowercase__ : Any = 9_9999_9999
lowercase__ : Optional[Any] = 0
lowercase__ : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowercase__ : Dict = remaining_time[j]
lowercase__ : List[Any] = j
lowercase__ : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowercase__ : Optional[Any] = remaining_time[short]
if minm == 0:
lowercase__ : Dict = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
lowercase__ : Union[str, Any] = False
# Find finish time of current process
lowercase__ : int = increment_time + 1
# Calculate waiting time
lowercase__ : Dict = finish_time - arrival_time[short]
lowercase__ : str = finar - burst_time[short]
if waiting_time[short] < 0:
lowercase__ : Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
lowercase__ : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = 0
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[Any] = total_waiting_time + waiting_time[i]
lowercase__ : Tuple = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
_UpperCamelCase : Tuple = int(input())
_UpperCamelCase : Tuple = [0] * no_of_processes
_UpperCamelCase : Optional[int] = [0] * no_of_processes
_UpperCamelCase : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
_UpperCamelCase : Optional[int] = map(int, input().split())
_UpperCamelCase : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCamelCase : Any = burst_time
_UpperCamelCase : Dict = no_of_processes
_UpperCamelCase : Tuple = waiting_time
_UpperCamelCase : str = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_UpperCamelCase : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 705
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 0
|
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase_ ( __lowercase):
def _UpperCAmelCase ( self ) -> Dict:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self._create_example_records()
lowercase__ : Optional[int] = Dataset.from_list(a )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(a ):
self.assertDictEqual(a , example_records[i] )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = self._create_example_records()
lowercase__ : Dict = Dataset.from_list(a )
lowercase__ : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _UpperCAmelCase ( self ) -> Optional[int]: # checks what happens with missing columns
lowercase__ : List[Any] = [{"col_1": 1}, {"col_2": "x"}]
lowercase__ : int = Dataset.from_list(a )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def _UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
lowercase__ : int = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase__ : Any = Dataset.from_list(a )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 706
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 0
|
"""simple docstring"""
import functools
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowercase__ : List[Any] = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase : Any = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
lowercase__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
lowercase__ : int = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
lowercase__ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
lowercase__ : int = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
lowercase__ : Optional[Any] = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
lowercase__ : List[str] = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
lowercase__ : Union[str, Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
lowercase__ : List[str] = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
import torch
lowercase__ : Tuple = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
lowercase__ : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
lowercase__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = pipeline('text-classification' )
lowercase__ : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowercase__ : Tuple = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowercase__ : Optional[int] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = pipeline('text-classification' , framework='tf' )
lowercase__ : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowercase__ : Dict = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowercase__ : str = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def _UpperCAmelCase ( self , a , a , a ) -> Tuple:
lowercase__ : List[str] = TextClassificationPipeline(model=a , tokenizer=a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCAmelCase ( self , a , a ) -> Dict:
lowercase__ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase__ : int = """HuggingFace is in"""
lowercase__ : Optional[Any] = text_classifier(a )
self.assertEqual(nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
lowercase__ : List[Any] = ["""HuggingFace is in """, """Paris is in France"""]
lowercase__ : Dict = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}, {'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase__ : Any = text_classifier(a , top_k=a )
lowercase__ : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a ) , [[{'label': ANY(a ), 'score': ANY(a )}] * N, [{'label': ANY(a ), 'score': ANY(a )}] * N] , )
lowercase__ : Tuple = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase__ : Dict = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , {'label': ANY(a ), 'score': ANY(a )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase__ : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(a ):
text_classifier(a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase__ : Any = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
"""simple docstring"""
import sys
import turtle
def a_ ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] ) -> int:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a_ ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
triangle(lowerCAmelCase__ , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , get_mid(lowerCAmelCase__ , lowerCAmelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
_UpperCamelCase : Optional[int] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
_UpperCamelCase : Union[str, Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 709
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Optional[int] ={
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =[
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 1000 ):
'''simple docstring'''
lowercase__ : int = 3
lowercase__ : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowercase__ : List[str] = xgb.predict(__lowerCAmelCase )
lowercase__ : List[Any] = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : str = fetch_california_housing()
lowercase__ , lowercase__ : int = data_handling(__lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Dict = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 712
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase : Union[str, Any] = list[list[float | int]]
def a_ ( _lowerCAmelCase : Matrix , _lowerCAmelCase : Matrix ):
'''simple docstring'''
lowercase__ : int = len(a_ )
lowercase__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(a_ )]
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : int
lowercase__ : float
for row in range(a_ ):
for col in range(a_ ):
lowercase__ : Any = matrix[row][col]
lowercase__ : Optional[Any] = vector[row][0]
lowercase__ : int = 0
lowercase__ : int = 0
while row < size and col < size:
# pivoting
lowercase__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a_ , a_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase__ : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , a_ ):
lowercase__ : Tuple = augmented[rowa][col] / augmented[row][col]
lowercase__ : Union[str, Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , a_ ):
for row in range(a_ ):
lowercase__ : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(a_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(a_ )
]
def a_ ( _lowerCAmelCase : list[int] ):
'''simple docstring'''
lowercase__ : int = len(a_ )
lowercase__ : Matrix = [[0 for _ in range(a_ )] for _ in range(a_ )]
lowercase__ : Matrix = [[0] for _ in range(a_ )]
lowercase__ : Matrix
lowercase__ : int
lowercase__ : int
lowercase__ : int
for x_val, y_val in enumerate(a_ ):
for col in range(a_ ):
lowercase__ : str = (x_val + 1) ** (size - col - 1)
lowercase__ : Dict = y_val
lowercase__ : Union[str, Any] = solve(a_ , a_ )
def interpolated_func(_lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(a_ ) )
return interpolated_func
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a_ ( _lowerCAmelCase : Callable[[int], int] = question_function , _lowerCAmelCase : int = 10 ):
'''simple docstring'''
lowercase__ : list[int] = [func(a_ ) for x_val in range(1 , order + 1 )]
lowercase__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase__ : int = 0
lowercase__ : Callable[[int], int]
lowercase__ : int
for poly in polynomials:
lowercase__ : Tuple = 1
while func(a_ ) == poly(a_ ):
x_val += 1
ret += poly(a_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase_ :
pass
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase : Dict = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase__ : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=False , a=9_9 , a=1_6 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=3_2 , a=2 , a=1 , a=0 , a=0.02 , ) -> Union[str, Any]:
lowercase__ : Tuple = parent
lowercase__ : int = batch_size
lowercase__ : Dict = seq_length
lowercase__ : int = is_training
lowercase__ : Dict = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Tuple = eos_token_id
lowercase__ : Tuple = pad_token_id
lowercase__ : int = bos_token_id
lowercase__ : Optional[int] = initializer_range
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase__ : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase__ : Any = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowercase__ : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , )
lowercase__ : Dict = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[Any] = 2_0
lowercase__ : Tuple = model_class_name(__UpperCamelCase )
lowercase__ : str = model.encode(inputs_dict['input_ids'] )
lowercase__ , lowercase__ : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase__ : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowercase__ : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : List[str] = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase__ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowercase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowercase__ : List[Any] = model.decode(__UpperCamelCase , __UpperCamelCase )
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[Any] = 2_0
lowercase__ : Any = model_class_name(__UpperCamelCase )
lowercase__ : int = model.encode(inputs_dict['input_ids'] )
lowercase__ , lowercase__ : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowercase__ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase__ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowercase__ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase__ : Optional[int] = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Optional[int] = 9_9
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowercase__ : Optional[int] = input_ids.shape[0]
lowercase__ : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ , lowercase__ : str = self._get_config_and_data()
lowercase__ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__UpperCamelCase )
lowercase__ : Any = lm_model(input_ids=__UpperCamelCase )
lowercase__ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowercase__ : List[Any] = FlaxBlenderbotForConditionalGeneration(__UpperCamelCase )
lowercase__ : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowercase__ : str = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowercase__ : int = lm_model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
lowercase__ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[int] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowercase__ : Dict = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowercase__ : Dict = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
lowercase__ : List[Any] = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase , _a):
lowerCamelCase__ : Any = True
lowerCamelCase__ : Any = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase__ : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = FlaxBlenderbotModelTester(self )
def _UpperCAmelCase ( self ) -> int:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowercase__ : List[Any] = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(a , a=None , **a ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('JIT Enabled' ):
lowercase__ : Optional[int] = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase__ : str = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Optional[Any] = model_class(__UpperCamelCase )
lowercase__ : Any = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowercase__ : Optional[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(a , a , a ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('JIT Enabled' ):
lowercase__ : Optional[int] = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase__ : Union[str, Any] = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
lowercase__ : int = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
lowercase__ : List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
lowercase__ : int = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=__UpperCamelCase )
lowercase__ : Any = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
lowercase__ : str = ['Sam']
lowercase__ : int = tokenizer(__UpperCamelCase , return_tensors='jax' )
lowercase__ : Tuple = model.generate(**__UpperCamelCase , **__UpperCamelCase )
lowercase__ : Any = 'Sam is a great name. It means \"sun\" in Gaelic.'
lowercase__ : Union[str, Any] = tokenizer.batch_decode(__UpperCamelCase , **__UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase : int = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _UpperCAmelCase ( self , a , a , a ) -> List[Any]:
lowercase__ : Any = ZeroShotClassificationPipeline(
model=a , tokenizer=a , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
lowercase__ : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(a , {'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# No kwarg
lowercase__ : int = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(a , {'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
lowercase__ : str = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(a , {'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
lowercase__ : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
a , {'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
lowercase__ : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
a , {'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
lowercase__ : List[Any] = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(a , {'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# https://github.com/huggingface/transformers/issues/13846
lowercase__ : str = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(1 )
] , )
lowercase__ : Any = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
a , [
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(2 )
] , )
with self.assertRaises(a ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(a ):
classifier(a , candidate_labels='politics' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' , candidate_labels=a )
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=a , )
self.run_entailment_id(a )
def _UpperCAmelCase ( self , a ) -> Tuple:
lowercase__ : int = zero_shot_classifier.model.config
lowercase__ : Tuple = config.labelaid
lowercase__ : Tuple = zero_shot_classifier.entailment_id
lowercase__ : List[Any] = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowercase__ : Any = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase__ : Dict = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowercase__ : List[Any] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowercase__ : str = original_labelaid
self.assertEqual(a , zero_shot_classifier.entailment_id )
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Tuple = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
lowercase__ : Tuple = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
lowercase__ : Optional[int] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
lowercase__ : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
lowercase__ : str = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=a , )
self.assertEqual(
nested_simplify(a ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
lowercase__ : Optional[int] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
lowercase__ : Dict = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=a , )
self.assertEqual(
nested_simplify(a ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 716
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 717
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_UpperCamelCase : List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ : int = '''lm_head'''
lowercase__ : List[str] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Tuple = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Tuple = value
elif weight_type == "weight_v":
lowercase__ : Optional[Any] = value
elif weight_type == "bias":
lowercase__ : str = value
else:
lowercase__ : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = []
lowercase__ : Optional[int] = fairseq_model.state_dict()
lowercase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : str = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : Any = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase__ : Dict = True
if "*" in mapped_key:
lowercase__ : Tuple = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : int = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
lowercase__ : List[str] = '''weight_v'''
elif "bias" in name:
lowercase__ : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Optional[Any] = '''weight'''
else:
lowercase__ : Optional[int] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[int] = full_name.split('conv_layers.' )[-1]
lowercase__ : Optional[int] = name.split('.' )
lowercase__ : List[Any] = int(items[0] )
lowercase__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase__ : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : List[str] = UniSpeechConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ : str = Dictionary.load_from_json(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Optional[int] = target_dict.pad_index
lowercase__ : str = target_dict.bos_index
lowercase__ : List[Any] = target_dict.eos_index
lowercase__ : int = len(target_dict.symbols )
lowercase__ : str = os.path.join(_lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : str = 42
lowercase__ : List[str] = 43
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Dict = WavaVecaPhonemeCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowerCAmelCase , )
lowercase__ : int = True if config.feat_extract_norm == '''layer''' else False
lowercase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
lowercase__ : int = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
lowercase__ : Dict = UniSpeechForCTC(_lowerCAmelCase )
else:
lowercase__ : Dict = UniSpeechForPreTraining(_lowerCAmelCase )
if is_finetuned:
lowercase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_unispeech.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 718
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Union[str, Any] = logging.get_logger()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : List[str] = 4_2
lowerCamelCase__ : Any = field(default_factory=UpperCamelCase_)
lowerCamelCase__ : Optional[int] = field(default_factory=UpperCamelCase_)
def _UpperCAmelCase ( self , a , a , a ) -> List[Any]:
lowercase__ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self , a ) -> List[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : str = 4_2
lowerCamelCase__ : List[Any] = 4_2
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[str] = field(default_factory=UpperCamelCase_)
lowerCamelCase__ : List[str] = field(default_factory=UpperCamelCase_)
lowerCamelCase__ : Optional[Any] = True
def __call__( self , a ) -> Dict:
lowercase__ : List[str] = Tracker(self.dest )(a ).parametrized
lowercase__ : Any = Tracker(self.src )(a ).parametrized
lowercase__ : Any = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
lowercase__ : Tuple = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a )} operations while"""
f""" destination module has {len(a )}.""" )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a ) -> List[Any]:
super().__init__()
lowercase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
lowercase__ : int = len(a ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
lowercase__ : Any = nn.ModuleDict(a )
def _UpperCAmelCase ( self , a ) -> Any:
return get_trunk_forward_outputs(
a , out_feat_keys=a , feature_blocks=self._feature_blocks , )
class UpperCAmelCase_ ( UpperCamelCase_):
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : List[str] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
lowercase__ : List[Any] = self.convert_name_to_timm(a )
lowercase__ : Optional[Any] = partial(lambda: (timm.create_model(a , pretrained=a ).eval(), None) )
else:
lowercase__ : int = super().__getitem__(a )
return val
class UpperCAmelCase_ ( UpperCamelCase_):
def __getitem__( self , a ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
lowercase__ : int = RegNetModel
else:
lowercase__ : Optional[Any] = RegNetForImageClassification
return val
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
lowercase__ : Dict = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Callable[[], nn.Module] , _lowerCAmelCase : Callable[[], nn.Module] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True , ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
lowercase__ : Union[str, Any] = from_model_func()
lowercase__ : Any = our_model_func(snake_case_ ).eval()
lowercase__ : Optional[Any] = ModuleTransfer(src=snake_case_ , dest=snake_case_ , raise_if_mismatch=snake_case_ )
lowercase__ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case_ )
if from_state_dict is not None:
lowercase__ : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowercase__ : List[str] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowercase__ : Union[str, Any] = manually_copy_vissl_head(snake_case_ , our_model.state_dict() , snake_case_ )
our_model.load_state_dict(snake_case_ )
lowercase__ : Tuple = our_model(snake_case_ , output_hidden_states=snake_case_ )
lowercase__ : List[str] = (
our_outputs.logits if isinstance(snake_case_ , snake_case_ ) else our_outputs.last_hidden_state
)
lowercase__ : str = from_model(snake_case_ )
lowercase__ : List[str] = from_output[-1] if type(snake_case_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowercase__ : Tuple = our_outputs.hidden_states[-1]
assert torch.allclose(snake_case_ , snake_case_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=snake_case_ , )
lowercase__ : List[str] = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=snake_case_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=snake_case_ , )
print(f"""Pushed {name}""" )
def a_ ( _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
'''simple docstring'''
lowercase__ : List[str] = '''imagenet-1k-id2label.json'''
lowercase__ : int = 1000
lowercase__ : List[Any] = (1, num_labels)
lowercase__ : Optional[int] = '''huggingface/label-files'''
lowercase__ : Optional[Any] = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
lowercase__ : Optional[Any] = {int(snake_case_ ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
lowercase__ : Tuple = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
lowercase__ : List[str] = NameToOurModelFuncMap()
lowercase__ : Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCAmelCase : str , _lowerCAmelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(snake_case_ , model_dir=str(snake_case_ ) , map_location='cpu' )
lowercase__ : Optional[int] = model_func()
# check if we have a head, if yes add it
lowercase__ : Optional[int] = files['''classy_state_dict''']['''base_model''']['''model''']
lowercase__ : Dict = model_state_dict['''trunk''']
model.load_state_dict(snake_case_ )
return model.eval(), model_state_dict["heads"]
# pretrained
lowercase__ : Optional[Any] = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase__ : Dict = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase__ : Tuple = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase__ : Any = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
lowercase__ : Tuple = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase__ : Optional[Any] = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase__ : Any = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase__ : int = partial(
snake_case_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , snake_case_ , snake_case_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
snake_case_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , snake_case_ , snake_case_ , snake_case_ , )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_UpperCamelCase : Optional[int] = parser.parse_args()
_UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_UpperCamelCase : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_UpperCamelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_UpperCamelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
_UpperCamelCase : List[Any] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
_UpperCamelCase : str = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
_UpperCamelCase : Dict = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
_UpperCamelCase : Tuple = tf.keras.preprocessing.image.img_to_array(test_image)
_UpperCamelCase : Any = np.expand_dims(test_image, axis=0)
_UpperCamelCase : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_UpperCamelCase : Tuple = """Normal"""
if result[0][0] == 1:
_UpperCamelCase : int = """Abnormality detected"""
| 720
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : List[str] = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowercase__ : Union[str, Any] = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase__ )
if matches:
lowercase__ : Union[str, Any] = float(matches[1] )
lowercase__ : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase__ : Tuple = 1001
lowercase__ : Union[str, Any] = 'imagenet-1k-id2label.json'
lowercase__ : List[str] = 'huggingface/label-files'
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
lowercase__ : Optional[Any] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
lowercase__ : int = 'background'
lowercase__ : str = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=False ):
'''simple docstring'''
lowercase__ : int = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
lowercase__ : Union[str, Any] = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase__ : Union[str, Any] = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowercase__ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowercase__ : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase__ : Any = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase__ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
lowercase__ : Union[str, Any] = 'google/' + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 721
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 0
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = [True] * n
lowercase__ : Optional[Any] = False
lowercase__ : Dict = False
lowercase__ : Dict = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase__ : int = i * 2
while index < n:
lowercase__ : Any = False
lowercase__ : List[str] = index + i
lowercase__ : Optional[Any] = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def a_ ( _lowerCAmelCase : int = 9999_6666_3333 ):
'''simple docstring'''
lowercase__ : Any = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
lowercase__ : Dict = prime_sieve(_lowerCAmelCase )
lowercase__ : str = 0
lowercase__ : Tuple = 0
lowercase__ : Any = primes[prime_index]
while (last_prime**2) <= limit:
lowercase__ : Optional[Any] = primes[prime_index + 1]
lowercase__ : str = last_prime**2
lowercase__ : Dict = next_prime**2
# Get numbers divisible by lps(current)
lowercase__ : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase__ : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase__ : Union[str, Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase__ : str = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 700
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase__ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({"summary": Value("string")})
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "summary"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 701
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_UpperCamelCase : Any = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_UpperCamelCase : str = typing.Union[np.floataa, int, float] # noqa: UP007
def a_ ( _lowerCAmelCase : Vector , _lowerCAmelCase : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowerCAmelCase ) - np.asarray(_lowerCAmelCase )) ** 2 ) )
def a_ ( _lowerCAmelCase : Vector , _lowerCAmelCase : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a_ ( ):
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
benchmark()
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowercase__ : Any = len(_lowerCAmelCase )
lowercase__ : List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase__ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase__ : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase__ : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowercase__ : Any = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
lowercase__ : List[Any] = int(sequence[i] , 2 )
return sequence
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ : List[str] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ : List[str] = gray_code_sequence_string(bit_count - 1 )
lowercase__ : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ : int = '0' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ : List[str] = '1' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase : int = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_UpperCamelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_UpperCamelCase : Optional[int] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCAmelCase ( self , a , a , a=None , a=1 , a="binary" , a=None ) -> Dict:
lowercase__ : Any = fa_score(
a , a , labels=a , pos_label=a , average=a , sample_weight=a )
return {"f1": float(a ) if score.size == 1 else score}
| 705
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase__ : str = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('RGB' )
return image
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dct.pop(_lowerCAmelCase )
lowercase__ : Optional[int] = val
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__ : Optional[int] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__ : List[Any] = torch.cat((q_bias, torch.zeros_like(_lowerCAmelCase , requires_grad=_lowerCAmelCase ), v_bias) )
lowercase__ : Optional[int] = qkv_bias
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = 364 if 'coco' in model_name else 224
lowercase__ : str = BlipaVisionConfig(image_size=_lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ : Union[str, Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ : Dict = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase__ : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase__ : List[str] = BlipaConfig(vision_config=_lowerCAmelCase , text_config=_lowerCAmelCase )
return config, image_size
@torch.no_grad()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ : int = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase__ : Any = tokenizer('\n' , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase__ : Tuple = get_blipa_config(_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
lowercase__ : Optional[int] = BlipaForConditionalGeneration(_lowerCAmelCase ).eval()
lowercase__ : List[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase__ : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase__ : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase__ : Dict = load_model_and_preprocess(
name=_lowerCAmelCase , model_type=_lowerCAmelCase , is_eval=_lowerCAmelCase , device=_lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase__ : Optional[int] = original_model.state_dict()
lowercase__ : int = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : List[str] = state_dict.pop(_lowerCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase__ : Any = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase__ : str = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase__ : List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase__ : str = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase__ : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase__ : Any = key.replace('t5' , 'language' )
lowercase__ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = hf_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert len(_lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ : str = load_demo_image()
lowercase__ : List[Any] = vis_processors['eval'](_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
lowercase__ : List[str] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
# create processor
lowercase__ : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
lowercase__ : Any = BlipaProcessor(image_processor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
lowercase__ : Any = processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
original_model.to(_lowerCAmelCase )
hf_model.to(_lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase__ : List[str] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase__ : str = hf_model(_lowerCAmelCase , _lowerCAmelCase ).logits
else:
lowercase__ : Dict = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase__ : Optional[int] = hf_model(_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ : List[str] = torch.tensor(
[[-41.5850, -4.4_4_4_0, -8.9_9_2_2], [-47.4322, -5.9_1_4_3, -1.7_3_4_0]] , device=_lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ : Optional[int] = torch.tensor(
[[-57.0109, -9.8_9_6_7, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCAmelCase )
else:
# cast to same type
lowercase__ : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCAmelCase ) , _lowerCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase__ : int = ''
lowercase__ : Dict = tokenizer(_lowerCAmelCase , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
lowercase__ : int = original_model.generate({'image': original_pixel_values} )
lowercase__ : Optional[int] = hf_model.generate(
_lowerCAmelCase , _lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowerCAmelCase )
lowercase__ : Optional[Any] = input_ids.shape[1]
lowercase__ : Tuple = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCAmelCase )
lowercase__ : Union[str, Any] = [text.strip() for text in output_text]
print('HF generation:' , _lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
_UpperCamelCase : Any = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 0
|
"""simple docstring"""
_UpperCamelCase : int = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
_UpperCamelCase : Optional[Any] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = from_type.lower().strip('s' )
lowercase__ : str = to_type.lower().strip('s' )
lowercase__ : Tuple = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
lowercase__ : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
lowercase__ : List[str] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
lowercase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowercase__ : str = METRIC_CONVERSION[to_sanitized]
lowercase__ : Union[str, Any] = 1
if from_exponent > to_exponent:
lowercase__ : int = from_exponent - to_exponent
else:
lowercase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , _lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a_ ( ):
lowercase__ : List[str] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
lowercase__ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
lowercase__ : List[Any] = parser.parse_args()
if not hasattr(_lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase__ : str = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_UpperCamelCase : Any = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_UpperCamelCase : Any = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return float((preds == labels).mean() )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]="binary" ) -> Tuple:
'''simple docstring'''
lowercase__ : Any = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : str = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = {}
for id_pred, label in zip(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : str = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowercase__ : Optional[Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ : Optional[Any] = [(pred, label)]
lowercase__ : List[Any] = [], []
for question, preds_labels in question_map.items():
lowercase__ : Dict = zip(*_lowerCAmelCase )
lowercase__ : Optional[Any] = fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average='macro' )
fas.append(_lowerCAmelCase )
lowercase__ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCAmelCase ) )
ems.append(_lowerCAmelCase )
lowercase__ : Union[str, Any] = float(sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) )
lowercase__ : Dict = sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
lowercase__ : Tuple = float(fa_score(y_true=_lowerCAmelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def _UpperCAmelCase ( self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(a , a )}
elif self.config_name == "cb":
return acc_and_fa(a , a , fa_avg='macro' )
elif self.config_name == "record":
lowercase__ : str = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowercase__ : str = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(a , a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(a , a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(a , a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 709
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=2 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> List[str]:
lowercase__ : Optional[Any] = parent
lowercase__ : List[Any] = 1_3
lowercase__ : Union[str, Any] = 7
lowercase__ : List[str] = True
lowercase__ : Dict = True
lowercase__ : Tuple = True
lowercase__ : str = True
lowercase__ : Dict = 9_9
lowercase__ : Any = 3_2
lowercase__ : Dict = 2
lowercase__ : int = 4
lowercase__ : Optional[Any] = 3_7
lowercase__ : Any = 'gelu'
lowercase__ : Optional[int] = 0.1
lowercase__ : Optional[int] = 0.1
lowercase__ : Dict = 5_1_2
lowercase__ : Union[str, Any] = 1_6
lowercase__ : Tuple = 2
lowercase__ : Dict = 0.02
lowercase__ : Any = 3
lowercase__ : Optional[int] = 4
lowercase__ : str = None
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Union[str, Any] = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple = None
if self.use_token_type_ids:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = None
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Optional[int]:
lowercase__ : str = TFRoFormerModel(config=a )
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ : Union[str, Any] = [input_ids, input_mask]
lowercase__ : Optional[int] = model(a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Optional[Any]:
lowercase__ : Optional[Any] = True
lowercase__ : Any = TFRoFormerForCausalLM(config=a )
lowercase__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase__ : Optional[int] = model(a )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[str]:
lowercase__ : Tuple = TFRoFormerForMaskedLM(config=a )
lowercase__ : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase__ : Any = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> int:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = TFRoFormerForSequenceClassification(config=a )
lowercase__ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> str:
lowercase__ : Optional[int] = self.num_choices
lowercase__ : Tuple = TFRoFormerForMultipleChoice(config=a )
lowercase__ : str = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Optional[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Tuple = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[str]:
lowercase__ : Any = self.num_labels
lowercase__ : Dict = TFRoFormerForTokenClassification(config=a )
lowercase__ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = TFRoFormerForQuestionAnswering(config=a )
lowercase__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowercase__ : Dict = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = self.prepare_config_and_inputs()
(
lowercase__
) : List[Any] = config_and_inputs
lowercase__ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[str] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self , a , a , a , a , a ) -> Union[str, Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = TFRoFormerModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(a )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowercase__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Optional[int] = model(a )[0]
# TODO Replace vocab size
lowercase__ : Any = 5_0_0_0_0
lowercase__ : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ : Optional[Any] = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : List[str] = 1E-4
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Union[str, Any] = tf.constant([[4, 1_0]] )
lowercase__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__ : List[str] = emba(input_ids.shape )
lowercase__ : Optional[Any] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(a , a , atol=self.tolerance )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
lowercase__ : Any = emba.weight[:3, :5]
tf.debugging.assert_near(a , a , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = 1E-4
def _UpperCAmelCase ( self ) -> Any:
# 2,12,16,64
lowercase__ : List[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowercase__ : int = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowercase__ : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
lowercase__ : str = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
lowercase__ : List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
a , a , a )
lowercase__ : List[Any] = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowercase__ : List[Any] = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , a , atol=self.tolerance )
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_UpperCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : Dict = deprecated_arg[3:]
lowercase__ : Tuple = not kwargs.pop(a )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : Optional[int] = kwargs.pop('tpu_name' , self.tpu_name )
lowercase__ : List[Any] = kwargs.pop('device_idx' , self.device_idx )
lowercase__ : Optional[int] = kwargs.pop('eager_mode' , self.eager_mode )
lowercase__ : Any = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**a )
lowerCamelCase__ : str = field(
default=_a , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Benchmark models in eager model."})
lowerCamelCase__ : bool = field(
default=_a , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
lowercase__ : Any = None
if self.tpu:
try:
if self.tpu_name:
lowercase__ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase__ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase__ : List[str] = None
return tpu
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase__ : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowercase__ : int = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowercase__ : str = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def _UpperCAmelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def _UpperCAmelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _UpperCAmelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _UpperCAmelCase ( self ) -> bool:
return self.n_gpu > 0
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase : int = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Any = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowercase__ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : Any = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f"""{key} -> {new_key}""" )
lowercase__ : int = s_dict.pop(_lowerCAmelCase )
return s_dict
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = emb.weight.shape
lowercase__ : Dict = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : List[str] = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : Any = os.path.basename(_lowerCAmelCase )
lowercase__ : Optional[int] = url.split('/' )[-2]
lowercase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowerCAmelCase ):
lowercase__ : Optional[Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
lowercase__ : List[str] = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
lowercase__ : Union[str, Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowercase__ : Dict = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : str = torch.load(_lowerCAmelCase , map_location='cpu' )
lowercase__ : List[str] = original_checkpoint['dims']
lowercase__ : Dict = original_checkpoint['model_state_dict']
lowercase__ : Optional[Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
lowercase__ : List[str] = True
lowercase__ : List[Any] = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowercase__ : str = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
lowercase__ : Optional[Any] = WhisperForConditionalGeneration(_lowerCAmelCase )
lowercase__ : List[Any] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : List[str] = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 712
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Optional[Any] = 6
lowercase__ : Optional[int] = 1
lowercase__ : Optional[Any] = 1901
lowercase__ : str = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : List[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : Optional[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a=1_0_2_4 , a=1_0_2_4 , a=3.6 ) -> Any:
lowercase__ : List[Any] = tokenizer
lowercase__ : Dict = tokenizer.bos_token_id
lowercase__ : Optional[Any] = dataset
lowercase__ : str = seq_length
lowercase__ : int = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> List[str]:
lowercase__ : List[Any] = iter(self.dataset )
lowercase__ : Tuple = True
while more_examples:
lowercase__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(a )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase__ : List[Any] = False
break
lowercase__ : Optional[int] = tokenizer(a , truncation=a )['input_ids']
lowercase__ : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(a ) , self.seq_length ):
lowercase__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(a ) == self.seq_length:
yield torch.tensor(a )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : int = {'streaming': True}
lowercase__ : List[str] = load_dataset(args.dataset_name , split='train' , **_lowerCAmelCase )
lowercase__ : Any = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
lowercase__ : Union[str, Any] = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
model.eval()
lowercase__ : List[Any] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
lowercase__ : Any = model(_lowerCAmelCase , labels=_lowerCAmelCase )
lowercase__ : List[str] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase__ : int = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
lowercase__ : Optional[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
lowercase__ : Tuple = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_UpperCamelCase : Optional[int] = Accelerator()
# Parse configuration
_UpperCamelCase : int = HfArgumentParser(EvaluationArguments)
_UpperCamelCase : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
_UpperCamelCase : List[str] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_UpperCamelCase : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_UpperCamelCase : int = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_UpperCamelCase : Optional[int] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 716
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 0
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
lowercase__ : List[str] = False
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ : List[str] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Dict = plt.imshow(_lowerCAmelCase )
fig.axes.get_xaxis().set_visible(_lowerCAmelCase )
fig.axes.get_yaxis().set_visible(_lowerCAmelCase )
plt.show()
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = datetime.now()
lowercase__ : List[Any] = current_time.strftime('%H:%M:%S' )
return timestamp
| 717
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase_ ( nn.Module):
def __init__( self ) -> Dict:
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4 )
lowercase__ : Optional[int] = nn.BatchNormad(4 )
lowercase__ : List[str] = nn.Linear(4 , 5 )
def _UpperCAmelCase ( self , a ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , model.state_dict() )
lowercase__ : Optional[int] = os.path.join(a , 'index.json' )
self.assertTrue(os.path.isfile(a ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase__ : Any = os.path.join(a , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(a ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase__ : Optional[Any] = torch.randn(2 , 3 , dtype=a )
with TemporaryDirectory() as tmp_dir:
lowercase__ : Any = offload_weight(a , 'weight' , a , {} )
lowercase__ : Any = os.path.join(a , 'weight.dat' )
self.assertTrue(os.path.isfile(a ) )
self.assertDictEqual(a , {'weight': {'shape': [2, 3], 'dtype': str(a ).split('.' )[1]}} )
lowercase__ : Optional[int] = load_offloaded_weight(a , index['weight'] )
self.assertTrue(torch.equal(a , a ) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = ModelForTest()
lowercase__ : List[Any] = model.state_dict()
lowercase__ : List[str] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowercase__ : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
lowercase__ : Tuple = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
lowercase__ : Optional[Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
lowercase__ : str = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
lowercase__ : List[Any] = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
# Duplicates are removed
lowercase__ : Union[str, Any] = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowercase__ : List[str] = extract_submodules_state_dict(a , ['a.1', 'a.2'] )
self.assertDictEqual(a , {'a.1': 0, 'a.2': 2} )
lowercase__ : int = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowercase__ : Tuple = extract_submodules_state_dict(a , ['a.1', 'a.2'] )
self.assertDictEqual(a , {'a.1.a': 0, 'a.2.a': 2} )
| 718
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCAmelCase )
if number < 1:
lowercase__ : List[Any] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_lowerCAmelCase )
lowercase__ : List[Any] = 1
for i in range(1 , _lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1:
return lst
lowercase__ : Dict = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase__ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase__ : int = 1
return lst
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : List[str] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 720
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : list[list[int]] = []
lowercase__ : list[int] = []
lowercase__ : Tuple = 0
lowercase__ : int = sum(_lowerCAmelCase )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return result
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , ):
'''simple docstring'''
if sum(_lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCAmelCase )) < max_sum:
return
if sum(_lowerCAmelCase ) == max_sum:
result.append(_lowerCAmelCase )
return
for index in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
create_state_space_tree(
_lowerCAmelCase , _lowerCAmelCase , index + 1 , [*path, nums[index]] , _lowerCAmelCase , remaining_nums_sum - nums[index] , )
_UpperCamelCase : Any = [3, 34, 4, 12, 5, 2]
_UpperCamelCase : List[str] = 9
_UpperCamelCase : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 721
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 0
|
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
_UpperCamelCase : str = logging.getLogger(__name__)
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = git.Repo(search_parent_directories=_lowerCAmelCase )
lowercase__ : Any = {
'repo_id': str(_lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(_lowerCAmelCase , 'git_log.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=4 )
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
if params.n_gpu <= 0:
lowercase__ : Dict = 0
lowercase__ : List[Any] = -1
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ : Optional[int] = int(os.environ['WORLD_SIZE'] )
lowercase__ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase__ : List[Any] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase__ : int = params.world_size // params.n_gpu_per_node
lowercase__ : List[Any] = params.global_rank // params.n_gpu_per_node
lowercase__ : List[str] = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ : List[str] = 1
lowercase__ : Tuple = 0
lowercase__ : int = 0
lowercase__ : List[Any] = 0
lowercase__ : int = 1
lowercase__ : List[Any] = 1
lowercase__ : Tuple = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ : Tuple = params.node_id == 0 and params.local_rank == 0
lowercase__ : Optional[int] = params.n_nodes > 1
# summary
lowercase__ : Union[str, Any] = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 700
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 701
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
lowercase__ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowercase__ : str = np.concatenate(_lowerCAmelCase , axis=0 )
lowercase__ : Union[str, Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
lowercase__ : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
lowercase__ : Optional[int] = 2.0 * image - 1.0
lowercase__ : Union[str, Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Optional[int] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=0.9_9_9_5 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , np.ndarray ):
lowercase__ : Optional[int] = True
lowercase__ : str = va.device
lowercase__ : Tuple = va.cpu().numpy()
lowercase__ : Any = va.cpu().numpy()
lowercase__ : Dict = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
lowercase__ : Optional[int] = (1 - t) * va + t * va
else:
lowercase__ : Dict = np.arccos(_lowerCAmelCase )
lowercase__ : Optional[Any] = np.sin(_lowerCAmelCase )
lowercase__ : Union[str, Any] = theta_a * t
lowercase__ : List[Any] = np.sin(_lowerCAmelCase )
lowercase__ : str = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase__ : Any = sin_theta_t / sin_theta_a
lowercase__ : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
lowercase__ : List[str] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Any = F.normalize(_lowerCAmelCase , dim=-1 )
lowercase__ : Dict = F.normalize(_lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
lowercase__ : Optional[int] = value
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a , a , a , a=None , a=None , a=None , ) -> Any:
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
lowercase__ : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
lowercase__ : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _UpperCAmelCase ( self , a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
set_requires_grad(self.vae , a )
def _UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.unet , a )
def _UpperCAmelCase ( self , a , a , a ) -> int:
# get the original timestep using init_timestep
lowercase__ : int = min(int(num_inference_steps * strength ) , a )
lowercase__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self , a , a , a , a , a , a=None ) -> Tuple:
if not isinstance(a , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(a )}""" )
lowercase__ : str = image.to(device=a , dtype=a )
if isinstance(a , a ):
lowercase__ : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase__ : Tuple = torch.cat(a , dim=0 )
else:
lowercase__ : Any = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 0.18_215 * init_latents
lowercase__ : int = init_latents.repeat_interleave(a , dim=0 )
lowercase__ : Dict = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
lowercase__ : int = self.scheduler.add_noise(a , a , a )
lowercase__ : List[Any] = init_latents
return latents
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
lowercase__ : int = self.feature_extractor.preprocess(a )
lowercase__ : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ : List[str] = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Optional[Any] = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> str:
lowercase__ : Tuple = latents.detach().requires_grad_()
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : int = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ : int = self.scheduler.alphas_cumprod[timestep]
lowercase__ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ : List[str] = torch.sqrt(a )
lowercase__ : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
lowercase__ : Dict = self.scheduler.sigmas[index]
lowercase__ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : str = 1 / 0.18_215 * sample
lowercase__ : int = self.vae.decode(a ).sample
lowercase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Dict = transforms.Resize(self.feature_extractor_size )(a )
lowercase__ : Optional[int] = self.normalize(a ).to(latents.dtype )
lowercase__ : str = self.clip_model.get_image_features(a )
lowercase__ : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ : Dict = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
lowercase__ : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
lowercase__ : int = latents.detach() + grads * (sigma**2)
lowercase__ : Optional[Any] = noise_pred_original
else:
lowercase__ : List[Any] = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , a , a , a = None , a = None , a = 5_1_2 , a = 5_1_2 , a = 0.6 , a = 5_0 , a = 7.5 , a = 1 , a = 0.0 , a = 1_0_0 , a = None , a = "pil" , a = True , a = 0.8 , a = 0.1 , a = 0.1 , ) -> Dict:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(a )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(a , torch.Generator ) and batch_size > 1:
lowercase__ : Optional[int] = [generator] + [None] * (batch_size - 1)
lowercase__ : Tuple = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowercase__ : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
lowercase__ : Dict = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : int = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : List[str] = self.get_image_description(a )
# get prompt text embeddings for content and style
lowercase__ : Any = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Tuple = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ : Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Union[str, Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
lowercase__ : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
lowercase__ : List[str] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ : Optional[Any] = {}
if accepts_offset:
lowercase__ : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ : Optional[int] = self.get_timesteps(a , a , self.device )
lowercase__ : List[Any] = timesteps[:1].repeat(a )
# Preprocess image
lowercase__ : Tuple = preprocess(a , a , a )
lowercase__ : int = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : List[Any] = preprocess(a , a , a )
lowercase__ : Union[str, Any] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ : str = slerp(a , a , a )
if clip_guidance_scale > 0:
lowercase__ : List[str] = self.get_clip_image_embeddings(a , a )
lowercase__ : Any = self.get_clip_image_embeddings(a , a )
lowercase__ : Dict = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : Optional[int] = content_text_input.input_ids.shape[-1]
lowercase__ : Optional[Any] = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ : Optional[int] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ : Any = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Optional[int] = {}
if accepts_eta:
lowercase__ : str = eta
# check if the scheduler accepts generator
lowercase__ : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ : str = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
lowercase__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : List[Any] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : int = 1 / 0.18_215 * latents
lowercase__ : List[Any] = self.vae.decode(a ).sample
lowercase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 703
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[float] ):
'''simple docstring'''
lowercase__ : str = 0.0_0
lowercase__ : int = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ : Optional[Any] = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_lowerCAmelCase )
first_sum += 1 / float(_lowerCAmelCase )
index += 1
return 1 / first_sum
def a_ ( _lowerCAmelCase : list[float] ):
'''simple docstring'''
lowercase__ : str = 0.0_0
lowercase__ : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ : int = f"""Resistor at index {index} has a negative value!"""
raise ValueError(_lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
while lo < hi:
lowercase__ : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase__ : str = mid + 1
else:
lowercase__ : List[str] = mid
return lo
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
lowercase__ : str = len(_lowerCAmelCase )
while lo < hi:
lowercase__ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase__ : List[str] = mid + 1
else:
lowercase__ : Optional[int] = mid
return lo
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[Any] = 0
lowercase__ : Optional[Any] = len(_lowerCAmelCase ) - 1
while left <= right:
lowercase__ : Optional[int] = left + (right - left) // 2
lowercase__ : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase__ : Union[str, Any] = midpoint - 1
else:
lowercase__ : Dict = midpoint + 1
return None
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = bisect.bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if index != len(_lowerCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if right < left:
return None
lowercase__ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCAmelCase , _lowerCAmelCase , midpoint + 1 , _lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input("Enter numbers separated by comma:\n").strip()
_UpperCamelCase : List[str] = sorted(int(item) for item in user_input.split(","))
_UpperCamelCase : str = int(input("Enter a single number to be found in the list:\n"))
_UpperCamelCase : Union[str, Any] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 705
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 0
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_UpperCamelCase : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_UpperCamelCase : Optional[int] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a_ ( ):
'''simple docstring'''
lowercase__ : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'rougeLsum'
lowercase__ : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
lowercase__ : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a_ ( ):
'''simple docstring'''
lowercase__ : int = ['rouge1', 'rouge2', 'rougeL']
lowercase__ : str = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
lowercase__ : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
lowercase__ : Optional[int] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
lowercase__ : Optional[Any] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
lowercase__ : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=['rougeLsum'] , newline_sep=_lowerCAmelCase )['rougeLsum']
lowercase__ : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def a_ ( ):
'''simple docstring'''
lowercase__ : str = Path('examples/seq2seq/test_data/wmt_en_ro' )
lowercase__ : Any = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Tuple = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
| 706
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Any = "vivit"
def __init__( self , a=2_2_4 , a=3_2 , a=[2, 1_6, 1_6] , a=3 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu_fast" , a=0.0 , a=0.0 , a=0.02 , a=1e-06 , a=True , **a , ) -> List[Any]:
lowercase__ : int = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : int = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Any = image_size
lowercase__ : List[Any] = num_frames
lowercase__ : Optional[int] = tubelet_size
lowercase__ : int = num_channels
lowercase__ : Optional[int] = qkv_bias
super().__init__(**a )
| 707
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = "▁"
_UpperCamelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCamelCase : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_UpperCamelCase : List[str] = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
lowercase__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
lowercase__ : List[str] = len(self.sp_model ) + self.fairseq_offset
lowercase__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
lowercase__ : List[str] = self.__dict__.copy()
lowercase__ : int = None
lowercase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> List[Any]:
lowercase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ : Tuple = {}
lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[str] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCAmelCase ( self , a ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Union[str, Any] = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self , a ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : List[Any] = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[Any] = -1
lowercase__ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : int = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : Optional[int] = TextStreamer(a )
model.generate(a , max_new_tokens=1_0 , do_sample=a , streamer=a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : List[str] = cs.out[:-1]
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[str] = -1
lowercase__ : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : int = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : List[str] = tokenizer.decode(greedy_ids[0] )
lowercase__ : List[str] = TextIteratorStreamer(a )
lowercase__ : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=a )
thread.start()
lowercase__ : Any = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : Dict = -1
lowercase__ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : Any = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : Dict = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(a , skip_prompt=a )
model.generate(a , max_new_tokens=1_0 , do_sample=a , streamer=a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : List[str] = cs.out[:-1]
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('distilgpt2' )
lowercase__ : List[Any] = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(a )
lowercase__ : Optional[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(a , skip_special_tokens=a )
model.generate(a , max_new_tokens=1 , do_sample=a , streamer=a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : Optional[int] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[Any] = tokenizer(a , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Optional[int] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[str] = -1
lowercase__ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : List[Any] = TextIteratorStreamer(a , timeout=0.001 )
lowercase__ : Dict = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowercase__ : Tuple = Thread(target=model.generate , kwargs=a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(a ):
lowercase__ : str = ''
for new_text in streamer:
streamer_text += new_text
| 709
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : str = field(
metadata={"help": "The output directory where the model will be written."} , )
lowerCamelCase__ : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
lowerCamelCase__ : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"})
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = HfArgumentParser((ModelArguments,) )
(lowercase__ ) : List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__ : Dict = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__ : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__ : Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowerCAmelCase , decoder_config=_lowerCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__ : Optional[int] = decoder_config.decoder_start_token_id
lowercase__ : str = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__ : Dict = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__ : Tuple = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__ : int = decoder_config.eos_token_id
lowercase__ : List[str] = decoder_start_token_id
lowercase__ : Any = pad_token_id
lowercase__ : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__ : Tuple = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = 0
while number > 0:
lowercase__ : List[str] = number % 10
sum_of_digits += last_digit
lowercase__ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : List[str] = factorial(_lowerCAmelCase )
lowercase__ : Tuple = split_and_add(_lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self , a , a ) -> Union[str, Any]:
lowercase__ : Any = text, pattern
lowercase__ : List[str] = len(a ), len(a )
def _UpperCAmelCase ( self , a ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self , a ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
lowercase__ : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ : Optional[int] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
lowercase__ : List[str] = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCamelCase : int = "ABAABA"
_UpperCamelCase : Optional[int] = "AB"
_UpperCamelCase : List[str] = BoyerMooreSearch(text, pattern)
_UpperCamelCase : Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 712
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 713
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( _lowerCAmelCase : Iterable[str] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = iter(_lowerCAmelCase )
while True:
lowercase__ : List[str] = tuple(itertools.islice(_lowerCAmelCase , _lowerCAmelCase ) )
if not chunk:
return
yield chunk
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Tuple = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Tuple = ''
if len(_lowerCAmelCase ) < 2:
return dirty
for i in range(len(_lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCAmelCase ) & 1:
clean += "X"
return clean
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCAmelCase )
return table
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = generate_table(_lowerCAmelCase )
lowercase__ : List[Any] = prepare_input(_lowerCAmelCase )
lowercase__ : Union[str, Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ : Any = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ : Optional[int] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[int] = generate_table(_lowerCAmelCase )
lowercase__ : Optional[int] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ : str = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ : List[str] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase : Dict = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : torch.FloatTensor
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a=3 , a=3 , a=("DownEncoderBlock2D",) , a=(6_4,) , a=2 , a=3_2 , a="silu" , a=True , ) -> str:
super().__init__()
lowercase__ : Tuple = layers_per_block
lowercase__ : Union[str, Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : int = None
lowercase__ : str = nn.ModuleList([] )
# down
lowercase__ : int = block_out_channels[0]
for i, down_block_type in enumerate(a ):
lowercase__ : List[Any] = output_channel
lowercase__ : Optional[int] = block_out_channels[i]
lowercase__ : Union[str, Any] = i == len(a ) - 1
lowercase__ : Tuple = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
lowercase__ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
lowercase__ : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Dict = 2 * out_channels if double_z else out_channels
lowercase__ : List[Any] = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
lowercase__ : Dict = False
def _UpperCAmelCase ( self , a ) -> Optional[int]:
lowercase__ : Dict = x
lowercase__ : Optional[Any] = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a ):
def custom_forward(*a ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
lowercase__ : int = down_block(a )
# middle
lowercase__ : Optional[int] = self.mid_block(a )
# post-process
lowercase__ : List[Any] = self.conv_norm_out(a )
lowercase__ : Dict = self.conv_act(a )
lowercase__ : Optional[Any] = self.conv_out(a )
return sample
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a=3 , a=3 , a=("UpDecoderBlock2D",) , a=(6_4,) , a=2 , a=3_2 , a="silu" , a="group" , ) -> Dict:
super().__init__()
lowercase__ : List[Any] = layers_per_block
lowercase__ : Optional[int] = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Tuple = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : int = in_channels if norm_type == 'spatial' else None
# mid
lowercase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
lowercase__ : Optional[Any] = list(reversed(a ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
lowercase__ : str = output_channel
lowercase__ : str = reversed_block_out_channels[i]
lowercase__ : Tuple = i == len(a ) - 1
lowercase__ : List[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Optional[Any] = SpatialNorm(block_out_channels[0] , a )
else:
lowercase__ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
lowercase__ : str = False
def _UpperCAmelCase ( self , a , a=None ) -> Dict:
lowercase__ : List[str] = z
lowercase__ : List[Any] = self.conv_in(a )
lowercase__ : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a ):
def custom_forward(*a ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
lowercase__ : List[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
lowercase__ : Optional[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
lowercase__ : Any = self.mid_block(a , a )
lowercase__ : List[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Any = up_block(a , a )
# post-process
if latent_embeds is None:
lowercase__ : int = self.conv_norm_out(a )
else:
lowercase__ : Union[str, Any] = self.conv_norm_out(a , a )
lowercase__ : Union[str, Any] = self.conv_act(a )
lowercase__ : Optional[int] = self.conv_out(a )
return sample
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a=None , a="random" , a=False , a=True ) -> Optional[Any]:
super().__init__()
lowercase__ : int = n_e
lowercase__ : str = vq_embed_dim
lowercase__ : Tuple = beta
lowercase__ : List[Any] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Optional[int] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Optional[int] = self.used.shape[0]
lowercase__ : List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : List[Any] = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : str = sane_index_shape
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Dict = inds.shape
assert len(a ) > 1
lowercase__ : List[Any] = inds.reshape(ishape[0] , -1 )
lowercase__ : Tuple = self.used.to(a )
lowercase__ : str = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : int = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : List[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : Optional[Any] = self.unknown_index
return new.reshape(a )
def _UpperCAmelCase ( self , a ) -> Tuple:
lowercase__ : List[str] = inds.shape
assert len(a ) > 1
lowercase__ : int = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : Dict = 0 # simply set to zero
lowercase__ : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : str = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : List[str] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : str = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
lowercase__ : str = self.embedding(a ).view(z.shape )
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
# compute loss for embedding
if not self.legacy:
lowercase__ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : Optional[int] = self.remap_to_used(a )
lowercase__ : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : Any = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _UpperCAmelCase ( self , a , a ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Any = self.unmap_to_all(a )
lowercase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : Optional[Any] = self.embedding(a )
if shape is not None:
lowercase__ : Dict = z_q.view(a )
# reshape back to match original input shape
lowercase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=False ) -> Any:
lowercase__ : Optional[Any] = parameters
lowercase__ : List[str] = torch.chunk(a , 2 , dim=1 )
lowercase__ : Any = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Dict = deterministic
lowercase__ : List[str] = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _UpperCAmelCase ( self , a = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : List[str] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : Dict = self.mean + self.std * sample
return x
def _UpperCAmelCase ( self , a=None ) -> Union[str, Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _UpperCAmelCase ( self , a , a=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _UpperCAmelCase ( self ) -> int:
return self.mean
| 716
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(lowercase__) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 717
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ : Optional[int] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
lowercase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
lowercase__ : List[Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : Dict = os.path.join(_lowerCAmelCase , 'README.md' )
print(f"""Generating {path}""" )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
_UpperCamelCase : List[str] = Path(__file__).resolve().parent.parent.parent
_UpperCamelCase : Union[str, Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCamelCase : Dict = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 718
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a_ ( ):
'''simple docstring'''
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
print('Generating prime p...' )
lowercase__ : Any = rabinMiller.generate_large_prime(_lowerCAmelCase )
print('Generating prime q...' )
lowercase__ : str = rabinMiller.generate_large_prime(_lowerCAmelCase )
lowercase__ : Any = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowercase__ : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowercase__ : Tuple = cryptoMath.find_mod_inverse(_lowerCAmelCase , (p - 1) * (q - 1) )
lowercase__ : Any = (n, e)
lowercase__ : List[str] = (n, d)
return (public_key, private_key)
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowercase__ : str = generate_key(_lowerCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 5_0 , a = "pil" , a = True , **a , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase__ : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , )
lowercase__ : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase__ : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Any = {}
if accepts_eta:
lowercase__ : str = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowercase__ : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : str = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VAE
lowercase__ : Optional[int] = self.vqvae.decode(a ).sample
lowercase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 720
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : int = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ["PerceiverFeatureExtractor"]
_UpperCamelCase : str = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a ) -> List[Any]:
lowercase__ : List[Any] = parent
def _UpperCAmelCase ( self ) -> str:
return {}
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
lowercase__ : Optional[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
# Initialize feature_extractor
lowercase__ : List[Any] = self.feature_extraction_class()
# Test not batched input
lowercase__ : Tuple = get_html_strings()[0]
lowercase__ : Any = feature_extractor(a )
# fmt: off
lowercase__ : Dict = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
lowercase__ : str = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
# Test batched
lowercase__ : List[Any] = get_html_strings()
lowercase__ : List[Any] = feature_extractor(a )
# fmt: off
lowercase__ : Union[str, Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
lowercase__ : Dict = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , a )
self.assertEqual(encoding.xpaths , a )
| 700
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 701
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : List[str] = SwinConfig()
lowercase__ : Optional[Any] = swin_name.split('_' )
lowercase__ : int = name_split[1]
lowercase__ : Tuple = int(name_split[4] )
lowercase__ : Optional[int] = int(name_split[3][-1] )
if model_size == "tiny":
lowercase__ : Tuple = 96
lowercase__ : List[Any] = (2, 2, 6, 2)
lowercase__ : Tuple = (3, 6, 12, 24)
elif model_size == "small":
lowercase__ : Optional[Any] = 96
lowercase__ : List[Any] = (2, 2, 18, 2)
lowercase__ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
lowercase__ : int = 128
lowercase__ : Any = (2, 2, 18, 2)
lowercase__ : Any = (4, 8, 16, 32)
else:
lowercase__ : List[str] = 192
lowercase__ : str = (2, 2, 18, 2)
lowercase__ : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
lowercase__ : Optional[Any] = 2_1841
else:
lowercase__ : str = 1000
lowercase__ : int = 'huggingface/label-files'
lowercase__ : Optional[Any] = 'imagenet-1k-id2label.json'
lowercase__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
lowercase__ : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = img_size
lowercase__ : Tuple = num_classes
lowercase__ : str = embed_dim
lowercase__ : List[Any] = depths
lowercase__ : Dict = num_heads
lowercase__ : Any = window_size
return config
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ : Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowercase__ : Any = 'encoder.' + name
if "attn.proj" in name:
lowercase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ : Tuple = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
lowercase__ : Tuple = 'layernorm.weight'
if name == "norm.bias":
lowercase__ : Optional[int] = 'layernorm.bias'
if "head" in name:
lowercase__ : Union[str, Any] = name.replace('head' , 'classifier' )
else:
lowercase__ : Dict = 'swin.' + name
return name
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Union[str, Any] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase__ : List[str] = key.split('.' )
lowercase__ : List[str] = int(key_split[1] )
lowercase__ : List[Any] = int(key_split[3] )
lowercase__ : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : Optional[int] = val[
dim : dim * 2, :
]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : List[str] = val[
:dim
]
lowercase__ : Tuple = val[
dim : dim * 2
]
lowercase__ : Union[str, Any] = val[
-dim:
]
else:
lowercase__ : Union[str, Any] = val
return orig_state_dict
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
lowercase__ : Optional[Any] = get_swin_config(_lowerCAmelCase )
lowercase__ : Any = SwinForImageClassification(_lowerCAmelCase )
model.eval()
lowercase__ : Dict = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
lowercase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
lowercase__ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
lowercase__ : List[str] = image_processor(images=_lowerCAmelCase , return_tensors='pt' )
lowercase__ : Tuple = timm_model(inputs['pixel_values'] )
lowercase__ : Optional[Any] = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase : Any = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
"""simple docstring"""
_UpperCamelCase : int = range(2, 20 + 1)
_UpperCamelCase : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
_UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowercase__ : int = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
lowercase__ : str = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
lowercase__ : Optional[Any] = 0, 0
lowercase__ : List[Any] = n - i
lowercase__ : str = memo.get(_lowerCAmelCase )
if sub_memo is not None:
lowercase__ : List[Any] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
lowercase__ : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Optional[Any] = _k
break
if max_jump >= 0:
lowercase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any] = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
lowercase__ : List[str] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
lowercase__ : Optional[Any] = []
else:
lowercase__ : Optional[int] = {c: []}
lowercase__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ : Any = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ : List[Any] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
lowercase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Optional[Any] = i
lowercase__ : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : str = ds_c + ds_b
diff += addend
lowercase__ : str = 0
for j in range(_lowerCAmelCase ):
lowercase__ : List[Any] = a_i[j] + addend
lowercase__ : Optional[int] = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
lowercase__ : Union[str, Any] = digits[j] + addend
if s >= 10:
lowercase__ : Union[str, Any] = divmod(_lowerCAmelCase , 10 )
lowercase__ : List[Any] = addend // 10 + quotient
else:
lowercase__ : Optional[Any] = s
lowercase__ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ : Dict = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def a_ ( _lowerCAmelCase = 10**15 ):
'''simple docstring'''
lowercase__ : List[Any] = [1]
lowercase__ : str = 1
lowercase__ : Optional[int] = 0
while True:
lowercase__ : str = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : str = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 0
|
"""simple docstring"""
from functools import reduce
_UpperCamelCase : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def a_ ( _lowerCAmelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 704
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self , a = True ) -> None:
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : str = directed
def _UpperCAmelCase ( self , a , a ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
self.adj_list[destination_vertex].append(a )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a )
lowercase__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : Optional[Any] = [destination_vertex]
lowercase__ : List[str] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Tuple = [destination_vertex]
lowercase__ : int = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 705
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "roberta"
def __init__( self , a=5_0_2_6_5 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> Optional[Any]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : str = vocab_size
lowercase__ : str = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Tuple = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Union[str, Any] = use_cache
lowercase__ : List[Any] = classifier_dropout
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 706
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 0
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Any = "T5Config"
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = "mt5"
lowerCamelCase__ : Tuple = MTaConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = "mt5"
lowerCamelCase__ : str = MTaConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "mt5"
lowerCamelCase__ : List[Any] = MTaConfig
| 707
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _UpperCAmelCase ( self , a=0 ) -> Optional[Any]:
lowercase__ : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a ) )
lowercase__ : List[Any] = np.random.RandomState(a )
lowercase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = self.get_dummy_inputs()
lowercase__ : Optional[Any] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : str = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Union[str, Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
# warmup pass to apply optimizations
lowercase__ : str = pipe(**self.get_dummy_inputs() )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : str = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[str] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs()
lowercase__ : int = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowercase__ : List[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCAmelCase ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = ort.SessionOptions()
lowercase__ : List[Any] = False
return options
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowercase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : int = 'A fantasy landscape, trending on artstation'
lowercase__ : int = np.random.RandomState(0 )
lowercase__ : str = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='np' , )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : Optional[Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Any = np.random.RandomState(0 )
lowercase__ : Optional[int] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a , output_type='np' , )
lowercase__ : str = output.images
lowercase__ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowercase__ : List[str] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
"""simple docstring"""
from math import isqrt, loga
def a_ ( _lowerCAmelCase : int ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Optional[int] = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def a_ ( _lowerCAmelCase : int = 80_0800 , _lowerCAmelCase : int = 80_0800 ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[Any] = degree * loga(_lowerCAmelCase )
lowercase__ : Tuple = int(_lowerCAmelCase )
lowercase__ : Dict = calculate_prime_numbers(_lowerCAmelCase )
lowercase__ : Dict = 0
lowercase__ : int = 0
lowercase__ : Tuple = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 709
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Optional[Any] ={"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str =["DPTFeatureExtractor"]
_UpperCamelCase : Union[str, Any] =["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] =[
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 0
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a = "" , a = False ) -> None:
# Mapping from the first character of the prefix of the node
lowercase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowercase__ : Tuple = is_leaf
lowercase__ : List[Any] = prefix
def _UpperCAmelCase ( self , a ) -> tuple[str, str, str]:
lowercase__ : Union[str, Any] = 0
for q, w in zip(self.prefix , a ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCAmelCase ( self , a ) -> None:
for word in words:
self.insert(a )
def _UpperCAmelCase ( self , a ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ : Any = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ : Tuple = RadixNode(prefix=a , is_leaf=a )
else:
lowercase__ : List[str] = self.nodes[word[0]]
lowercase__ : Optional[int] = incoming_node.match(
a )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(a )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ : Optional[int] = remaining_prefix
lowercase__ : List[Any] = self.nodes[matching_string[0]]
lowercase__ : List[str] = RadixNode(a , a )
lowercase__ : Tuple = aux_node
if remaining_word == "":
lowercase__ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(a )
def _UpperCAmelCase ( self , a ) -> bool:
lowercase__ : Union[str, Any] = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
lowercase__ : Dict = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(a )
def _UpperCAmelCase ( self , a ) -> bool:
lowercase__ : str = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
lowercase__ : Optional[int] = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(a )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ : Optional[Any] = list(self.nodes.values() )[0]
lowercase__ : int = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ : Any = list(incoming_node.nodes.values() )[0]
lowercase__ : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ : Dict = merging_node.nodes
return True
def _UpperCAmelCase ( self , a = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'banana bananas bandana band apple all beast'.split()
lowercase__ : Optional[int] = RadixNode()
root.insert_many(_lowerCAmelCase )
assert all(root.find(_lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def a_ ( ):
'''simple docstring'''
assert test_trie()
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = RadixNode()
lowercase__ : Union[str, Any] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(_lowerCAmelCase )
print('Words:' , _lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( _a):
def __init__( self , a , a = None , a = None , a = True , a = None , a = False , a = None , a = True , a = "arrow" , **a , ) -> Optional[Any]:
super().__init__(
split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , **a , )
lowercase__ : Tuple = load_from_cache_file
lowercase__ : Optional[Any] = file_format
lowercase__ : List[Any] = Spark(
df=a , features=a , cache_dir=a , working_dir=a , **a , )
def _UpperCAmelCase ( self ) -> Any:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 713
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 10**9 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 1
lowercase__ : Union[str, Any] = 2
lowercase__ : Tuple = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase__ : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
lowerCamelCase__ : List[Any] = None
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Any = os.path.join(a , 'feat_extract.json' )
feat_extract_first.to_json_file(a )
lowercase__ : List[str] = self.feature_extraction_class.from_json_file(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : List[str] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
lowercase__ : Tuple = self.feature_extraction_class.from_pretrained(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(a )
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 0
|
"""simple docstring"""
from random import randint, random
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : int = 5 , ):
'''simple docstring'''
lowercase__ : Dict = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ : int = 0
lowercase__ : Any = max(_lowerCAmelCase , 0 )
while i < number_of_cells:
lowercase__ : Optional[int] = (
randint(0 , _lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Tuple = highway_now[car_index + 1 :]
for cell in range(len(_lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCAmelCase , -1 )
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : float , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = len(_lowerCAmelCase )
# Beforce calculations, the highway is empty
lowercase__ : Optional[Any] = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ : Tuple = min(highway_now[car_index] + 1 , _lowerCAmelCase )
# Number of empty cell before the next car
lowercase__ : Union[str, Any] = get_distance(_lowerCAmelCase , _lowerCAmelCase ) - 1
# We can't have the car causing an accident
lowercase__ : Optional[int] = min(next_highway[car_index] , _lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = len(highway[0] )
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = update(highway[i] , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[int] = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
lowercase__ : Optional[int] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ : List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ : List[str] = speed
highway.append(_lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 717
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
import os
def a_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCAmelCase ) + '/p022_names.txt' ) as file:
lowercase__ : Union[str, Any] = str(file.readlines()[0] )
lowercase__ : Tuple = names.replace('"' , '' ).split(',' )
names.sort()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(_lowerCAmelCase ):
for letter in name:
name_score += ord(_lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 718
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "canine"
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=1_6_3_8_4 , a=1_6 , a=0.02 , a=1e-12 , a=0 , a=0XE_000 , a=0XE_001 , a=4 , a=4 , a=8 , a=1_6_3_8_4 , a=1_2_8 , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : str = max_position_embeddings
lowercase__ : Optional[int] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : List[str] = type_vocab_size
lowercase__ : Union[str, Any] = layer_norm_eps
# Character config:
lowercase__ : Any = downsampling_rate
lowercase__ : List[str] = upsampling_kernel_size
lowercase__ : Optional[int] = num_hash_functions
lowercase__ : Optional[Any] = num_hash_buckets
lowercase__ : Optional[int] = local_transformer_stride
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : Optional[Any] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 0
|
"""simple docstring"""
from math import isqrt
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(_lowerCAmelCase ) + 1 ) )
def a_ ( _lowerCAmelCase : int = 10**6 ):
'''simple docstring'''
lowercase__ : List[Any] = 0
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 721
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "open-llama"
def __init__( self , a=1_0_0_0_0_0 , a=4_0_9_6 , a=1_1_0_0_8 , a=3_2 , a=3_2 , a="silu" , a=2_0_4_8 , a=0.02 , a=1e-6 , a=True , a=0 , a=1 , a=2 , a=False , a=True , a=0.1 , a=0.1 , a=True , a=True , a=None , **a , ) -> Optional[Any]:
lowercase__ : List[Any] = vocab_size
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = rms_norm_eps
lowercase__ : List[Any] = use_cache
lowercase__ : Any = kwargs.pop(
'use_memorry_efficient_attention' , a )
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : List[str] = attention_dropout_prob
lowercase__ : Tuple = use_stable_embedding
lowercase__ : Optional[Any] = shared_input_output_embedding
lowercase__ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a , )
def _UpperCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
lowercase__ : Dict = self.rope_scaling.get('type' , a )
lowercase__ : Union[str, Any] = self.rope_scaling.get('factor' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 700
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : str = TaConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : str = TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 701
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=False , a=True , a=False , a=False , a=1_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : int = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = num_labels
lowercase__ : Any = num_choices
lowercase__ : List[Any] = scope
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Tuple = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> str:
lowercase__ : List[str] = EsmForProteinFolding(config=a ).float()
model.to(a )
model.eval()
lowercase__ : Dict = model(a , attention_mask=a )
lowercase__ : List[str] = model(a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
lowercase__
) : Optional[int] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Any = False
lowerCamelCase__ : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase__ : List[str] = ()
lowerCamelCase__ : Optional[int] = {} if is_torch_available() else {}
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[Any] = EsmFoldModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip('Does not support attention outputs' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip('ESMFold only has one output format.' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@require_torch
class UpperCAmelCase_ ( _a):
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
lowercase__ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowercase__ : Union[str, Any] = model(a )['positions']
lowercase__ : Any = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4 ) )
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=2_4 , a=2 , a=6 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=None , a=1_0_0_0 , ) -> Dict:
lowercase__ : Optional[int] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[str] = is_training
lowercase__ : Optional[int] = use_input_mask
lowercase__ : List[Any] = use_token_type_ids
lowercase__ : Any = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[str] = initializer_range
lowercase__ : Dict = num_labels
lowercase__ : Any = scope
lowercase__ : Dict = range_bbox
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ : List[Any] = bbox[i, j, 3]
lowercase__ : str = bbox[i, j, 1]
lowercase__ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ : Optional[int] = bbox[i, j, 2]
lowercase__ : Dict = bbox[i, j, 0]
lowercase__ : Tuple = t
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__ : int = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Dict:
lowercase__ : Union[str, Any] = LiltModel(config=a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , bbox=a , attention_mask=a , token_type_ids=a )
lowercase__ : Optional[int] = model(a , bbox=a , token_type_ids=a )
lowercase__ : List[str] = model(a , bbox=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Tuple:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : List[str] = LiltForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(
a , bbox=a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a , a , ) -> Optional[int]:
lowercase__ : List[str] = LiltForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(
a , bbox=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = self.prepare_config_and_inputs()
(
lowercase__
) : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : str = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
def _UpperCAmelCase ( self , a , a , a , a , a ) -> str:
return True
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = LiltModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : List[str] = type
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = LiltModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(a )
lowercase__ : Any = torch.tensor([[1, 2]] , device=a )
lowercase__ : Union[str, Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(input_ids=a , bbox=a )
lowercase__ : str = torch.Size([1, 2, 7_6_8] )
lowercase__ : str = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=a , )
self.assertTrue(outputs.last_hidden_state.shape , a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a , atol=1e-3 ) )
| 703
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return choice(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = random_pivot(_lowerCAmelCase )
# partition based on pivot
# linear time
lowercase__ : Optional[int] = [e for e in lst if e < pivot]
lowercase__ : str = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCAmelCase ) < k - 1:
return kth_number(_lowerCAmelCase , k - len(_lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( _lowerCAmelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_UpperCamelCase : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCAmelCase_ ( _a):
@staticmethod
def _UpperCAmelCase ( a ) -> Tuple:
lowercase__ : Dict = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a )
def __init__( self , a , a , a , a , a , *a , ) -> Union[str, Any]:
lowercase__ : List[str] = logging.get_logger('transformers-cli/converting' )
self._logger.info(f"""Loading model {model_type}""" )
lowercase__ : int = model_type
lowercase__ : Dict = tf_checkpoint
lowercase__ : Union[str, Any] = pytorch_dump_output
lowercase__ : int = config
lowercase__ : str = finetuning_task_name
def _UpperCAmelCase ( self ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
if "ckpt" in self._tf_checkpoint.lower():
lowercase__ : Optional[int] = self._tf_checkpoint
lowercase__ : int = ''
else:
lowercase__ : Optional[int] = self._tf_checkpoint
lowercase__ : List[str] = ''
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 705
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : int = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 1
lowercase__ : List[str] = 3
lowercase__ : List[Any] = (3_2, 3_2)
lowercase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
@property
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowercase__ : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(a )
@property
def _UpperCAmelCase ( self ) -> Any:
def extract(*a , **a ):
class UpperCAmelCase_ :
def __init__( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = torch.ones([0] )
def _UpperCAmelCase ( self , a ) -> str:
self.pixel_values.to(a )
return self
return Out()
return extract
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.dummy_cond_unet
lowercase__ : Any = PNDMScheduler(skip_prk_steps=a )
lowercase__ : str = self.dummy_vae
lowercase__ : Union[str, Any] = self.dummy_text_encoder
lowercase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase__ : Any = 7_7
lowercase__ : List[Any] = self.dummy_image.to(a )
lowercase__ : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase__ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
lowercase__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a )
lowercase__ : Tuple = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
lowercase__ : Tuple = 'A painting of a squirrel eating a burger'
lowercase__ : str = torch.Generator(device=a ).manual_seed(0 )
lowercase__ : List[Any] = alt_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=a , )
lowercase__ : int = output.images
lowercase__ : List[Any] = torch.Generator(device=a ).manual_seed(0 )
lowercase__ : Dict = alt_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=a , return_dict=a , )[0]
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase__ : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.dummy_cond_unet
lowercase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=a )
lowercase__ : Dict = self.dummy_vae
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase__ : int = 7_7
lowercase__ : str = self.dummy_image.to(a )
# put models in fp16
lowercase__ : int = unet.half()
lowercase__ : List[str] = vae.half()
lowercase__ : Dict = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ : List[Any] = AltDiffusionImgaImgPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
lowercase__ : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a )
lowercase__ : Union[str, Any] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = alt_pipe(
[prompt] , generator=a , num_inference_steps=2 , output_type='np' , image=a , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : Optional[Any] = init_image.resize((7_6_0, 5_0_4) )
lowercase__ : Tuple = 'BAAI/AltDiffusion'
lowercase__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
a , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : List[str] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , generator=a , output_type='np' , )
lowercase__ : Optional[Any] = output.images[0]
lowercase__ : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase__ : int = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Tuple = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowercase__ : Union[str, Any] = 'BAAI/AltDiffusion'
lowercase__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
a , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
lowercase__ : Optional[Any] = 'A fantasy landscape, trending on artstation'
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , generator=a , output_type='np' , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 707
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = "swin2sr"
lowerCamelCase__ : Any = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=6_4 , a=1 , a=3 , a=1_8_0 , a=[6, 6, 6, 6, 6, 6] , a=[6, 6, 6, 6, 6, 6] , a=8 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=0.02 , a=1e-5 , a=2 , a=1.0 , a="1conv" , a="pixelshuffle" , **a , ) -> Optional[Any]:
super().__init__(**a )
lowercase__ : str = image_size
lowercase__ : Tuple = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Tuple = embed_dim
lowercase__ : int = depths
lowercase__ : int = len(a )
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : Dict = mlp_ratio
lowercase__ : List[Any] = qkv_bias
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Any = use_absolute_embeddings
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : str = initializer_range
lowercase__ : List[Any] = upscale
lowercase__ : List[Any] = img_range
lowercase__ : str = resi_connection
lowercase__ : Any = upsampler
| 708
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 0
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 709
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 0
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_UpperCamelCase : Tuple ="\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_UpperCamelCase : List[Any] ="\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_UpperCamelCase : int ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def _UpperCAmelCase ( self , a , a , a=None ) -> str:
return {
"matthews_correlation": float(matthews_corrcoef(a , a , sample_weight=a ) ),
}
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCamelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "upernet"
def __init__( self , a=None , a=5_1_2 , a=0.02 , a=[1, 2, 3, 6] , a=True , a=0.4 , a=3_8_4 , a=2_5_6 , a=1 , a=False , a=2_5_5 , **a , ) -> List[Any]:
super().__init__(**a )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(a , a ):
lowercase__ : Any = backbone_config.get('model_type' )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(a )
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Any = hidden_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = pool_scales
lowercase__ : List[Any] = use_auxiliary_head
lowercase__ : Any = auxiliary_loss_weight
lowercase__ : List[Any] = auxiliary_in_channels
lowercase__ : Any = auxiliary_channels
lowercase__ : str = auxiliary_num_convs
lowercase__ : Tuple = auxiliary_concat_input
lowercase__ : List[Any] = loss_ignore_index
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = copy.deepcopy(self.__dict__ )
lowercase__ : Union[str, Any] = self.backbone_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : int = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 712
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 0
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=2 , a=5_6 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=2 , a=2 , a=7 , a="gelu_new" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , a="block_sparse" , a=True , a=False , a=2 , a=3 , ) -> int:
lowercase__ : Tuple = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : Dict = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Optional[int] = num_choices
lowercase__ : List[Any] = rescale_embeddings
lowercase__ : Any = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[Any] = block_size
lowercase__ : Any = num_random_blocks
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str] = None
if self.use_attention_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = False
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[str]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> Optional[int]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> str:
super().test_hidden_states_output()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
lowercase__ : int = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Optional[int] = self._prepare_for_class(a , a )
lowercase__ : List[str] = model_class(a )
@jax.jit
def model_jitted(a , a=None , **a ):
return model(input_ids=a , attention_mask=a , **a )
with self.subTest('JIT Enabled' ):
lowercase__ : Optional[Any] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase__ : List[str] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self , a , a , a , a=1e-5 , a="outputs" , a=None ) -> Optional[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(a , a , a , a , a , a )
| 713
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self , a ) -> Dict:
lowercase__ : Optional[Any] = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(a ) != 0:
lowercase__ : Optional[int] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a ) != cols:
raise error
for value in row:
if not isinstance(a , (int, float) ):
raise error
lowercase__ : Tuple = rows
else:
lowercase__ : int = []
def _UpperCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows )
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a )
def _UpperCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a ).determinant()
def _UpperCAmelCase ( self , a , a ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(a , a )
return -1 * self.get_minor(a , a )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(a , a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a )
def _UpperCAmelCase ( self ) -> Matrix:
lowercase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(a ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self , a , a = None ) -> None:
lowercase__ : str = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(a , a ):
raise type_error
for value in row:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(a )
else:
lowercase__ : int = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self , a , a = None ) -> None:
lowercase__ : Union[str, Any] = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(a , a ):
raise type_error
for value in column:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase__ : Dict = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , a ) -> bool:
if not isinstance(a , a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , a ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , a ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , a ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , a ) -> Matrix:
if isinstance(a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a , a ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(a , a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , a ) -> Matrix:
if not isinstance(a , a ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase__ : Tuple = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls , a , a ) -> int:
return sum(row[i] * column[i] for i in range(len(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
"""simple docstring"""
import functools
from typing import Any
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or len(_lowerCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowercase__ : dict[str, Any] = {}
lowercase__ : str = 'WORD_KEEPER'
for word in words:
lowercase__ : int = trie
for c in word:
if c not in trie_node:
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = trie_node[c]
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCAmelCase : int ) -> bool:
if index == len_string:
return True
lowercase__ : Union[str, Any] = trie
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Any = trie_node.get(string[i] , _lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCAmelCase , _lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.