code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ : Optional[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ZeroShotClassificationPipeline(
model=lowercase_ , tokenizer=lowercase_ , candidate_labels=['''polics''', '''health'''])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''')
self.assertEqual(lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_)], '''scores''': [ANY(lowercase_)]})
# No kwarg
SCREAMING_SNAKE_CASE_ : str = classifier('''Who are you voting for in 2020?''' , ['''politics'''])
self.assertEqual(lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_)], '''scores''': [ANY(lowercase_)]})
SCREAMING_SNAKE_CASE_ : int = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''])
self.assertEqual(lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_)], '''scores''': [ANY(lowercase_)]})
SCREAMING_SNAKE_CASE_ : int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''')
self.assertEqual(
lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_), ANY(lowercase_)], '''scores''': [ANY(lowercase_), ANY(lowercase_)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
SCREAMING_SNAKE_CASE_ : int = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''])
self.assertEqual(
lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_), ANY(lowercase_)], '''scores''': [ANY(lowercase_), ANY(lowercase_)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
SCREAMING_SNAKE_CASE_ : Dict = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''')
self.assertEqual(lowercase_ , {'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_)], '''scores''': [ANY(lowercase_)]})
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE_ : Any = classifier(['''I am happy'''] , ['''positive''', '''negative'''])
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_), ANY(lowercase_)], '''scores''': [ANY(lowercase_), ANY(lowercase_)]}
for i in range(1)
] , )
SCREAMING_SNAKE_CASE_ : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''])
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''labels''': [ANY(lowercase_), ANY(lowercase_)], '''scores''': [ANY(lowercase_), ANY(lowercase_)]}
for i in range(2)
] , )
with self.assertRaises(lowercase_):
classifier('''''' , candidate_labels='''politics''')
with self.assertRaises(lowercase_):
classifier(lowercase_ , candidate_labels='''politics''')
with self.assertRaises(lowercase_):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''')
with self.assertRaises(lowercase_):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowercase_)
with self.assertRaises(lowercase_):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowercase_):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowercase_ , )
self.run_entailment_id(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Pipeline):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE_ : int = config.labelaid
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE_ : List[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
SCREAMING_SNAKE_CASE_ : str = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_labelaid
self.assertEqual(lowercase_ , zero_shot_classifier.entailment_id)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE_ : str = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE_ : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''')
SCREAMING_SNAKE_CASE_ : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
SCREAMING_SNAKE_CASE_ : Any = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''')
SCREAMING_SNAKE_CASE_ : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
SCREAMING_SNAKE_CASE_ : Dict = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase_ : List[str] = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[str] = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = generator.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Tuple = pipe(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''').images
SCREAMING_SNAKE_CASE_ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 318
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "resnet"
__UpperCamelCase = ["basic", "bottleneck"]
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=[256, 512, 1024, 2048] , lowercase_ : Optional[Any]=[3, 4, 6, 3] , lowercase_ : List[Any]="bottleneck" , lowercase_ : Optional[int]="relu" , lowercase_ : Dict=False , lowercase_ : List[str]=None , lowercase_ : str=None , **lowercase_ : int , ):
'''simple docstring'''
super().__init__(**lowercase_)
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE_ : Any = depths
SCREAMING_SNAKE_CASE_ : Tuple = layer_type
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = downsample_in_first_stage
SCREAMING_SNAKE_CASE_ : int = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowercase_) + 1)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return 1e-3
| 318
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "xlm"
__UpperCamelCase = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : str , lowercase_ : int=30145 , lowercase_ : List[str]=2048 , lowercase_ : Dict=12 , lowercase_ : int=16 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=False , lowercase_ : Any=False , lowercase_ : List[str]=False , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=True , lowercase_ : int=512 , lowercase_ : Dict=2048**-0.5 , lowercase_ : Any=1e-12 , lowercase_ : str=0.02 , lowercase_ : str=0 , lowercase_ : Optional[Any]=1 , lowercase_ : Tuple=2 , lowercase_ : Dict=3 , lowercase_ : List[Any]=5 , lowercase_ : List[str]=True , lowercase_ : Any="first" , lowercase_ : Union[str, Any]=True , lowercase_ : str=None , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=5 , lowercase_ : Optional[int]=5 , lowercase_ : int=0 , lowercase_ : Optional[Any]=0 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=0 , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = emb_dim
SCREAMING_SNAKE_CASE_ : Any = n_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = n_heads
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : Any = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = gelu_activation
SCREAMING_SNAKE_CASE_ : Optional[Any] = sinusoidal_embeddings
SCREAMING_SNAKE_CASE_ : str = causal
SCREAMING_SNAKE_CASE_ : str = asm
SCREAMING_SNAKE_CASE_ : Tuple = n_langs
SCREAMING_SNAKE_CASE_ : Any = use_lang_emb
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = bos_index
SCREAMING_SNAKE_CASE_ : List[str] = eos_index
SCREAMING_SNAKE_CASE_ : List[Any] = pad_index
SCREAMING_SNAKE_CASE_ : int = unk_index
SCREAMING_SNAKE_CASE_ : int = mask_index
SCREAMING_SNAKE_CASE_ : Dict = is_encoder
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = embed_init_std
SCREAMING_SNAKE_CASE_ : Optional[int] = init_std
SCREAMING_SNAKE_CASE_ : Dict = summary_type
SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE_ : int = summary_activation
SCREAMING_SNAKE_CASE_ : Optional[Any] = summary_proj_to_labels
SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout
SCREAMING_SNAKE_CASE_ : str = start_n_top
SCREAMING_SNAKE_CASE_ : Optional[Any] = end_n_top
SCREAMING_SNAKE_CASE_ : Tuple = mask_token_id
SCREAMING_SNAKE_CASE_ : Dict = lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE_ : Dict = kwargs['''n_words''']
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
"""simple docstring"""
import operator
def _A (__a , __a = False , __a = None ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE_ : List[Any] = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE_ : Tuple = [arr.pop(0 )]
for i, item in enumerate(__a ):
if _operator(__a , sublist[-1] ):
sublist.append(__a )
arr.pop(__a )
# merging sublist into solution list
if not solution:
solution.extend(__a )
else:
while sublist:
SCREAMING_SNAKE_CASE_ : List[Any] = sublist.pop(0 )
for i, xx in enumerate(__a ):
if not _operator(__a , __a ):
solution.insert(__a , __a )
break
else:
solution.append(__a )
strand_sort(__a , __a , __a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
def _A (__a ) -> bool:
"""simple docstring"""
def is_valid_tree(__a ) -> bool:
if node is None:
return True
if not isinstance(__a , __a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a , __a , __a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __a )
)
return is_binary_search_tree_recursive_check(__a , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "LayoutLMv2ImageProcessor"
__UpperCamelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , lowercase_ : Tuple=None , lowercase_ : str=None , **lowercase_ : List[str]):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''feature_extractor''')
SCREAMING_SNAKE_CASE_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase_ , lowercase_)
def __call__( self : str , lowercase_ : List[str] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''')
# first, apply the image processor
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=lowercase_)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE_ : Any = features['''words''']
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
SCREAMING_SNAKE_CASE_ : Union[str, Any] = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE_ : int = self.get_overflowing_images(lowercase_ , encoded_inputs['''overflow_to_sample_mapping'''])
SCREAMING_SNAKE_CASE_ : Optional[int] = images
return encoded_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowercase_) != len(lowercase_):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F' {len(lowercase_)} and {len(lowercase_)}')
return images_with_overflow
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : List[str] , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : str):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 318
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE_ : str = (32, 32)
SCREAMING_SNAKE_CASE_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase_)
return image
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : Optional[int] = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
SCREAMING_SNAKE_CASE_ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ : str = Image.fromarray(np.uinta(lowercase_)).convert('''RGB''').resize((64, 64))
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=lowercase_).manual_seed(0)
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Dict = output.images
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=lowercase_).manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowercase_ , )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : Dict = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : str = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ : List[Any] = Image.fromarray(np.uinta(lowercase_)).convert('''RGB''').resize((64, 64))
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=lowercase_).manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE_ : str = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : Dict = DDIMScheduler(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_vae
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
SCREAMING_SNAKE_CASE_ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.fromarray(np.uinta(lowercase_)).convert('''RGB''').resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE_ : Dict = unet.half()
SCREAMING_SNAKE_CASE_ : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=lowercase_ , low_res_scheduler=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowercase_)
sd_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe(
[prompt] , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='''np''' , ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
SCREAMING_SNAKE_CASE_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''')
SCREAMING_SNAKE_CASE_ : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : List[str] = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
SCREAMING_SNAKE_CASE_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''')
SCREAMING_SNAKE_CASE_ : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : List[Any] = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
SCREAMING_SNAKE_CASE_ : str = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE_ : str = StableDiffusionUpscalePipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[str] = pipe(
prompt=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _A (__a ) -> YolosConfig:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE_ : List[Any] = 1_92
SCREAMING_SNAKE_CASE_ : Any = 7_68
SCREAMING_SNAKE_CASE_ : Tuple = 12
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : List[str] = [8_00, 13_33]
SCREAMING_SNAKE_CASE_ : List[Any] = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE_ : int = 3_30
SCREAMING_SNAKE_CASE_ : Optional[int] = 14
SCREAMING_SNAKE_CASE_ : int = 6
SCREAMING_SNAKE_CASE_ : str = 13_20
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_84
SCREAMING_SNAKE_CASE_ : List[Any] = 15_36
SCREAMING_SNAKE_CASE_ : Dict = 12
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [8_00, 13_44]
SCREAMING_SNAKE_CASE_ : List[Any] = 91
SCREAMING_SNAKE_CASE_ : str = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE_ : Any = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Dict = idalabel
SCREAMING_SNAKE_CASE_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _A (__a , __a , __a = False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def _A (__a ) -> str:
"""simple docstring"""
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _A (__a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Dict = orig_state_dict.pop(__a )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.split('''.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE_ : str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : List[str] = val[:dim]
SCREAMING_SNAKE_CASE_ : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : Dict = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return orig_state_dict
def _A () -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Tuple = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a = False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_yolos_config(__a )
# load original state_dict
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(__a , map_location='''cpu''' )['''model''']
# load 🤗 model
SCREAMING_SNAKE_CASE_ : int = YolosForObjectDetection(__a )
model.eval()
SCREAMING_SNAKE_CASE_ : int = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE_ : List[str] = 8_00 if yolos_name != '''yolos_ti''' else 5_12
SCREAMING_SNAKE_CASE_ : Tuple = YolosImageProcessor(format='''coco_detection''' , size=__a )
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
SCREAMING_SNAKE_CASE_ : List[str] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE_ : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization='''hustvl''' )
model.push_to_hub(__a , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase_ : Optional[Any] = ["""bert-base-uncased""", """bert-base-cased"""]
UpperCAmelCase_ : Union[str, Any] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCAmelCase__ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : int):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoConfig.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_config(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.bert(**lowercase_)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : Optional[int] = [
BertTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ : Dict = [TFBertTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
SCREAMING_SNAKE_CASE_ : List[str] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE_ : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1]))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : int = tokenizer(lowercase_ , return_tensors='''tf''' , padding='''longest''')
SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(lowercase_)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(self.paired_sentences)
SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.function(lowercase_)
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : Any = tf.constant(lowercase_)
SCREAMING_SNAKE_CASE_ : int = compiled_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_tokenizer(lowercase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : List[str] = ModelToSave(tokenizer=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.convert_to_tensor(self.test_sentences)
SCREAMING_SNAKE_CASE_ : int = model(lowercase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Any = Path(lowercase_) / '''saved.model'''
model.save(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.models.load_model(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = loaded_model(lowercase_)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 318
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 1
|
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318
| 1
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : Union[str, Any] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
UpperCAmelCase_ : Tuple = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str]=None , lowercase_ : int=False , lowercase_ : List[str]=False , lowercase_ : Optional[int]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([re.sub(lowercase_ , '''''' , lowercase_) for x in predictions])
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([re.sub(lowercase_ , '''''' , lowercase_) for x in references])
else:
SCREAMING_SNAKE_CASE_ : List[Any] = np.asarray(lowercase_)
SCREAMING_SNAKE_CASE_ : str = np.asarray(lowercase_)
if ignore_case:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.char.lower(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = np.char.lower(lowercase_)
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ : int = string.punctuation.maketrans('''''' , '''''' , string.punctuation)
SCREAMING_SNAKE_CASE_ : List[str] = np.char.translate(lowercase_ , table=lowercase_)
SCREAMING_SNAKE_CASE_ : int = np.char.translate(lowercase_ , table=lowercase_)
if ignore_numbers:
SCREAMING_SNAKE_CASE_ : List[str] = string.digits.maketrans('''''' , '''''' , string.digits)
SCREAMING_SNAKE_CASE_ : List[Any] = np.char.translate(lowercase_ , table=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = np.char.translate(lowercase_ , table=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = predictions == references
return {"exact_match": np.mean(lowercase_) * 100}
| 318
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 1
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _A (__a , __a ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
SCREAMING_SNAKE_CASE_ : List[str] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE_ : str = prophet
SCREAMING_SNAKE_CASE_ : Dict = prophet_old
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prophet.prophetnet
SCREAMING_SNAKE_CASE_ : Tuple = prophet_old.model
SCREAMING_SNAKE_CASE_ : List[Any] = False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE_ : List[str] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
SCREAMING_SNAKE_CASE_ : str = attribute
elif hasattr(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_ : int = old_model.weight
logger.info(f'{attribute} is initialized.' )
SCREAMING_SNAKE_CASE_ : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__a , '''in_proj_weight''' ):
SCREAMING_SNAKE_CASE_ : Dict = old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE_ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE_ : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE_ : Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE_ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE_ : int = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
SCREAMING_SNAKE_CASE_ : List[str] = True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE_ : str = model[int(__a )]
SCREAMING_SNAKE_CASE_ : Optional[int] = old_model[int(__a )]
else:
SCREAMING_SNAKE_CASE_ : Tuple = getattr(__a , __a )
if old_attribute == "":
SCREAMING_SNAKE_CASE_ : Tuple = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 318
|
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
| 1
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : List[str] , lowercase_ : Tuple):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = RobertaEmbeddings(lowercase_)
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : Any , lowercase_ : List[str]):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = config.num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = DeeRobertaModel(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Dropout(config.hidden_dropout_prob)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=-1 , lowercase_ : str=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.num_layers
try:
SCREAMING_SNAKE_CASE_ : Dict = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Dict = outputs[1]
SCREAMING_SNAKE_CASE_ : List[str] = self.dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier(lowercase_)
SCREAMING_SNAKE_CASE_ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_ : List[Any] = e.message
SCREAMING_SNAKE_CASE_ : List[Any] = e.exit_layer
SCREAMING_SNAKE_CASE_ : List[str] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_ : Dict = entropy(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Optional[Any] = MSELoss()
SCREAMING_SNAKE_CASE_ : Optional[int] = loss_fct(logits.view(-1) , labels.view(-1))
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
SCREAMING_SNAKE_CASE_ : Tuple = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Optional[Any] = MSELoss()
SCREAMING_SNAKE_CASE_ : List[Any] = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
SCREAMING_SNAKE_CASE_ : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(lowercase_)
if train_highway:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_ : Dict = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 318
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features", "is_longer"]
def __init__( self : List[str] , lowercase_ : List[str]=64 , lowercase_ : Any=48000 , lowercase_ : int=480 , lowercase_ : Any=10 , lowercase_ : Tuple=1024 , lowercase_ : int=0.0 , lowercase_ : Optional[int]=False , lowercase_ : float = 0 , lowercase_ : float = 14000 , lowercase_ : int = None , lowercase_ : str = "fusion" , lowercase_ : str = "repeatpad" , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE_ : str = truncation
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding
SCREAMING_SNAKE_CASE_ : Any = fft_window_size
SCREAMING_SNAKE_CASE_ : Any = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE_ : Tuple = max_length_s
SCREAMING_SNAKE_CASE_ : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = sampling_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = frequency_min
SCREAMING_SNAKE_CASE_ : List[str] = frequency_max
SCREAMING_SNAKE_CASE_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase_ , min_frequency=lowercase_ , max_frequency=lowercase_ , sampling_rate=lowercase_ , norm=lowercase_ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase_ , min_frequency=lowercase_ , max_frequency=lowercase_ , sampling_rate=lowercase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_ : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.array , lowercase_ : Optional[np.array] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = spectrogram(
lowercase_ , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ : str = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.choice(ranges[0])
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.choice(ranges[1])
SCREAMING_SNAKE_CASE_ : str = np.random.choice(ranges[2])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ : str = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(mel[None, None, :])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.functional.interpolate(
lowercase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : np.array , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ : Tuple = len(lowercase_) - max_length
SCREAMING_SNAKE_CASE_ : str = np.random.randint(0 , overflow + 1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._np_extract_fbank_features(lowercase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ : Optional[int] = self._np_extract_fbank_features(lowercase_ , self.mel_filters)
SCREAMING_SNAKE_CASE_ : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ : Any = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ : List[Any] = np.stack([mel, mel, mel, mel] , axis=0)
SCREAMING_SNAKE_CASE_ : List[str] = False
else:
SCREAMING_SNAKE_CASE_ : Dict = self._random_mel_fusion(lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented')
else:
SCREAMING_SNAKE_CASE_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(max_length / len(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.stack(np.tile(lowercase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ : Tuple = int(max_length / len(lowercase_))
SCREAMING_SNAKE_CASE_ : Dict = np.stack(np.tile(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.pad(lowercase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._np_extract_fbank_features(lowercase_ , self.mel_filters)
SCREAMING_SNAKE_CASE_ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self._np_extract_fbank_features(lowercase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : List[str] , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : str = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : Tuple = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : Tuple = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : int = [np.asarray(lowercase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : Tuple = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(lowercase_)]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ : List[str] = [
self._get_input_mel(lowercase_ , max_length if max_length else self.nb_max_samples , lowercase_ , lowercase_)
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(lowercase_)
is_longer.append(lowercase_)
if truncation == "fusion" and sum(lowercase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ : str = np.random.randint(0 , len(lowercase_))
SCREAMING_SNAKE_CASE_ : int = True
if isinstance(input_mel[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : int = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ : Tuple = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ : str = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ : Dict = BatchFeature(lowercase_)
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Dict = input_features.convert_to_tensors(lowercase_)
return input_features
| 318
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
| 1
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 1
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : int = 1000 , lowercase_ : Optional[Union[np.ndarray, List[float]]] = None):
'''simple docstring'''
self.set_timesteps(lowercase_)
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE_ : List[str] = 4
# running values
SCREAMING_SNAKE_CASE_ : List[str] = []
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = num_inference_steps
SCREAMING_SNAKE_CASE_ : int = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sin(steps * math.pi / 2) ** 2
SCREAMING_SNAKE_CASE_ : List[str] = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
SCREAMING_SNAKE_CASE_ : List[str] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE_ : Dict = timestep_index + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowercase_)
if len(self.ets) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = self.ets[-1]
elif len(self.ets) == 2:
SCREAMING_SNAKE_CASE_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
SCREAMING_SNAKE_CASE_ : List[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE_ : Any = self._get_prev_sample(lowercase_ , lowercase_ , lowercase_ , lowercase_)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : torch.FloatTensor , *lowercase_ : List[Any] , **lowercase_ : Any):
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE_ : Tuple = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE_ : Optional[int] = (sample - sigma * ets) / max(lowercase_ , 1e-8)
SCREAMING_SNAKE_CASE_ : Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : str):
'''simple docstring'''
return self.config.num_train_timesteps
| 318
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318
| 1
|
"""simple docstring"""
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) <= 1:
return lst
SCREAMING_SNAKE_CASE_ : Any = 1
while i < len(__a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase_ : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 318
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : int = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ : Optional[int] = {
"""camembert-base""": 512,
}
UpperCAmelCase_ : Union[str, Any] = """▁"""
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = CamembertTokenizer
def __init__( self : Dict , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : Union[str, Any]="<s>" , lowercase_ : Dict="</s>" , lowercase_ : str="</s>" , lowercase_ : Dict="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Optional[int]="<mask>" , lowercase_ : int=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
return (out_vocab_file,)
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "instructblip_vision_model"
def __init__( self : str , lowercase_ : Optional[int]=1408 , lowercase_ : str=6144 , lowercase_ : Optional[Any]=39 , lowercase_ : List[str]=16 , lowercase_ : Any=224 , lowercase_ : int=14 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=1e-6 , lowercase_ : Dict=0.0 , lowercase_ : Optional[Any]=1e-10 , lowercase_ : List[Any]=True , **lowercase_ : Any , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = qkv_bias
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Union[str, Any]):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(lowercase_ , **lowercase_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
SCREAMING_SNAKE_CASE_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "instructblip_qformer"
def __init__( self : List[str] , lowercase_ : Union[str, Any]=30522 , lowercase_ : Union[str, Any]=768 , lowercase_ : int=12 , lowercase_ : Tuple=12 , lowercase_ : int=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : str=512 , lowercase_ : List[str]=0.02 , lowercase_ : str=1e-12 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]="absolute" , lowercase_ : Dict=2 , lowercase_ : List[Any]=1408 , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : int = cross_attention_frequency
SCREAMING_SNAKE_CASE_ : int = encoder_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Dict):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = cls.get_config_dict(lowercase_ , **lowercase_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
SCREAMING_SNAKE_CASE_ : Dict = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "instructblip"
__UpperCamelCase = True
def __init__( self : List[Any] , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : int=32 , **lowercase_ : Any):
'''simple docstring'''
super().__init__(**lowercase_)
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipVisionConfig(**lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipQFormerConfig(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE_ : Optional[int] = CONFIG_MAPPING[text_model_type](**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Dict = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE_ : List[str] = num_query_tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Any = 1.0
SCREAMING_SNAKE_CASE_ : List[Any] = 0.02
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , lowercase_ : InstructBlipVisionConfig , lowercase_ : InstructBlipQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : int , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE_ : Any = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.__class__.model_type
return output
| 318
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318
| 1
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int = 13 , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 3 , lowercase_ : int = 3 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : int = 128 , lowercase_ : Tuple=[16, 32, 64, 128] , lowercase_ : int = 7 , lowercase_ : int = 4 , lowercase_ : int = 37 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 10 , lowercase_ : float = 0.02 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 128 , lowercase_ : List[int] = [2, 2, 2, 2] , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : int = encoder_stride
SCREAMING_SNAKE_CASE_ : Any = num_attention_outputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : str = embed_dim + 1
SCREAMING_SNAKE_CASE_ : Dict = resolution
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : str = hidden_sizes
SCREAMING_SNAKE_CASE_ : int = dim
SCREAMING_SNAKE_CASE_ : int = mlp_expansion_ratio
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = TFEfficientFormerModel(config=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , training=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = TFEfficientFormerForImageClassification(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , labels=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Tuple = TFEfficientFormerForImageClassification(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TFEfficientFormerModelTester(self)
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : str , lowercase_ : int , lowercase_ : List[Any]):
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
SCREAMING_SNAKE_CASE_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase_) , lowercase_)
if hasattr(self.model_tester , '''encoder_seq_length'''):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE_ : List[Any] = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Any = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple))
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(self.model_tester , '''seq_length''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , lowercase_)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : int=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[Any] = TFEfficientFormerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Tuple = getattr(self.model_tester , '''seq_length''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester , '''encoder_seq_length''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester , '''key_length''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = getattr(self.model_tester , '''chunk_length''' , lowercase_)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
SCREAMING_SNAKE_CASE_ : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_) , training=lowercase_)
SCREAMING_SNAKE_CASE_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase_)
self.assertTrue(outputs_dict is not None)
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
SCREAMING_SNAKE_CASE_ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''tf''')
# forward pass
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_ , training=lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant([-0.05_55, 0.48_25, -0.08_52])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=lowercase_ , return_tensors='''tf''')
# forward pass
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_ , training=lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Any = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = tf.constant([-0.13_12, 0.43_53, -1.04_99])
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _A (__a , __a = "cpu" , __a = None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 318
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318
| 1
|
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {}
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
print(self.vertex)
for i in self.vertex:
print(lowercase_ , ''' -> ''' , ''' -> '''.join([str(lowercase_) for j in self.vertex[i]]))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_)
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ : Tuple = [to_vertex]
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int , lowercase_ : list):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = True
print(lowercase_ , end=''' ''')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 318
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "distilbert"
__UpperCamelCase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Any , lowercase_ : str=30522 , lowercase_ : Union[str, Any]=512 , lowercase_ : Union[str, Any]=False , lowercase_ : int=6 , lowercase_ : List[str]=12 , lowercase_ : Tuple=768 , lowercase_ : Tuple=4 * 768 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str="gelu" , lowercase_ : List[Any]=0.02 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.2 , lowercase_ : str=0 , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = sinusoidal_pos_embds
SCREAMING_SNAKE_CASE_ : List[Any] = n_layers
SCREAMING_SNAKE_CASE_ : Dict = n_heads
SCREAMING_SNAKE_CASE_ : Any = dim
SCREAMING_SNAKE_CASE_ : Dict = hidden_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE_ : str = attention_dropout
SCREAMING_SNAKE_CASE_ : int = activation
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = qa_dropout
SCREAMING_SNAKE_CASE_ : Tuple = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 318
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a = None , __a = None , __a = False , ) -> tuple[int, float, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = cipher_alphabet or [chr(__a ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''a''': 0.0_84_97,
'''b''': 0.0_14_92,
'''c''': 0.0_22_02,
'''d''': 0.0_42_53,
'''e''': 0.1_11_62,
'''f''': 0.0_22_28,
'''g''': 0.0_20_15,
'''h''': 0.0_60_94,
'''i''': 0.0_75_46,
'''j''': 0.0_01_53,
'''k''': 0.0_12_92,
'''l''': 0.0_40_25,
'''m''': 0.0_24_06,
'''n''': 0.0_67_49,
'''o''': 0.0_75_07,
'''p''': 0.0_19_29,
'''q''': 0.0_00_95,
'''r''': 0.0_75_87,
'''s''': 0.0_63_27,
'''t''': 0.0_93_56,
'''u''': 0.0_27_58,
'''v''': 0.0_09_78,
'''w''': 0.0_25_60,
'''x''': 0.0_01_50,
'''y''': 0.0_19_94,
'''z''': 0.0_00_77,
}
else:
# Custom frequencies dictionary
SCREAMING_SNAKE_CASE_ : int = frequencies_dict
if not case_sensitive:
SCREAMING_SNAKE_CASE_ : Dict = ciphertext.lower()
# Chi squared statistic values
SCREAMING_SNAKE_CASE_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
SCREAMING_SNAKE_CASE_ : Optional[int] = (alphabet_letters.index(letter.lower() ) - shift) % len(
__a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
SCREAMING_SNAKE_CASE_ : Any = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE_ : Optional[Any] = decrypted_with_shift.lower().count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE_ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE_ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE_ : Tuple = decrypted_with_shift.count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE_ : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE_ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
SCREAMING_SNAKE_CASE_ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__a ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
SCREAMING_SNAKE_CASE_ : int = min(
__a , key=__a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [0 for i in range(len(__a ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = 0, 0
for i in range(1 , len(__a ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE_ : str = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_edge
while go_next(__a , __a , __a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = i, i + z_result[i] - 1
return z_result
def _A (__a , __a , __a ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(__a ) and s[z_result[i]] == s[i + z_result[i]]
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE_ : Union[str, Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
SCREAMING_SNAKE_CASE_ : List[Any] = [0 for i in range(0 , 4 * size)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0 for i in range(0 , 4 * size)] # flag for lazy update
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int):
'''simple docstring'''
return idx * 2
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int):
'''simple docstring'''
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[int]):
'''simple docstring'''
if left_element == right_element:
SCREAMING_SNAKE_CASE_ : Any = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = (left_element + right_element) // 2
self.build(self.left(lowercase_) , lowercase_ , lowercase_ , lowercase_)
self.build(self.right(lowercase_) , mid + 1 , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = max(
self.segment_tree[self.left(lowercase_)] , self.segment_tree[self.right(lowercase_)])
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE_ : int = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : int = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE_ : str = val
SCREAMING_SNAKE_CASE_ : int = val
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE_ : Tuple = (left_element + right_element) // 2
self.update(self.left(lowercase_) , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.update(self.right(lowercase_) , mid + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = max(
self.segment_tree[self.left(lowercase_)] , self.segment_tree[self.right(lowercase_)])
return True
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE_ : Tuple = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE_ : Tuple = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : List[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE_ : Optional[int] = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE_ : Any = self.query(self.left(lowercase_) , lowercase_ , lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.query(self.right(lowercase_) , mid + 1 , lowercase_ , lowercase_ , lowercase_)
return max(lowercase_ , lowercase_)
def __str__( self : Union[str, Any]):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowercase_ , lowercase_) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase_ : int = 15
UpperCAmelCase_ : List[str] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 318
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (__a , __a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertConfig.from_json_file(__a )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : str = BertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__a , __a , __a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a ) -> list[int]: # This function is recursive
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(__a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE_ : List[str] = array[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Tuple = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE_ : str = longest_subsequence(__a )
if len(__a ) > len(__a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE_ : Dict = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE_ : List[str] = [pivot, *longest_subsequence(__a )]
if len(__a ) > len(__a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (EulerDiscreteScheduler,)
__UpperCamelCase = 1_0
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : str = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ : str = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''')
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 0.00_02) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : List[Any] = sample.to(lowercase_)
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 10.08_07) < 1e-2
assert abs(result_mean.item() - 0.01_31) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : int = sample.to(lowercase_)
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE_ : str = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63) < 1e-3
| 318
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(__a , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE_ : str = torch.load(__a , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE_ : Any = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE_ : List[str] = sd.pop(__a )
SCREAMING_SNAKE_CASE_ : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE_ : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = torch.split(__a , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE_ : Dict = q
SCREAMING_SNAKE_CASE_ : List[str] = k
SCREAMING_SNAKE_CASE_ : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def _A (__a , __a , __a=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = load_checkpoint(__a )
if config is not None:
SCREAMING_SNAKE_CASE_ : Any = OPTConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OPTConfig()
SCREAMING_SNAKE_CASE_ : List[str] = OPTModel(__a ).half().eval()
model.load_state_dict(__a )
# Check results
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
"""simple docstring"""
def _A (__a ) -> int:
"""simple docstring"""
assert (
isinstance(__a , __a ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 1
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : int = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
__UpperCamelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__UpperCamelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.task_name.lower()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "train"
__UpperCamelCase = "dev"
__UpperCamelCase = "test"
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[str] , lowercase_ : GlueDataTrainingArguments , lowercase_ : PreTrainedTokenizerBase , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , lowercase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args
SCREAMING_SNAKE_CASE_ : Optional[int] = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE_ : List[str] = glue_output_modes[args.task_name]
if isinstance(lowercase_ , lowercase_):
try:
SCREAMING_SNAKE_CASE_ : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
SCREAMING_SNAKE_CASE_ : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : List[Any] = cached_features_file + '''.lock'''
with FileLock(lowercase_):
if os.path.exists(lowercase_) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE_ : Tuple = time.time()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(lowercase_)
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
else:
logger.info(F'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
SCREAMING_SNAKE_CASE_ : Tuple = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.processor.get_test_examples(args.data_dir)
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
SCREAMING_SNAKE_CASE_ : List[str] = examples[:limit_length]
SCREAMING_SNAKE_CASE_ : List[Any] = glue_convert_examples_to_features(
lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE_ : Any = time.time()
torch.save(self.features , lowercase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self : List[str]):
'''simple docstring'''
return len(self.features)
def __getitem__( self : Dict , lowercase_ : Tuple):
'''simple docstring'''
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
return self.label_list
| 318
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 1
|
"""simple docstring"""
from collections.abc import Callable
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Callable | None = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_ : dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_ : int = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_ : Any = key or (lambda lowercase_: x)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int):
'''simple docstring'''
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self._left(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self._right(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = i
if left is not None and not self._cmp(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : str = left
if right is not None and not self._cmp(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[int] = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self._parent(lowercase_)
while parent is not None and not self._cmp(lowercase_ , lowercase_):
self._swap(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = parent, self._parent(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self._get_valid_parent(lowercase_)
while valid_parent != index:
self._swap(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = valid_parent, self._get_valid_parent(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Dict = self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [item, self.key(lowercase_)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase_)
self._heapify_down(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int):
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Any = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_ : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase_)
self._heapify_down(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowercase_)])
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [item, self.key(lowercase_)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def _A () -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
UpperCAmelCase_ : Any = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def _A () -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE_ : Optional[int] = g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
SCREAMING_SNAKE_CASE_ : Tuple = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 318
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = filter(lambda __a : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE_ : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE_ : Tuple = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE_ : List[Any] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
SCREAMING_SNAKE_CASE_ : int = ModelCheckpoint(
dirpath=__a , filename=__a , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _A (__a , __a ) -> List[str]:
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__a , verbose=__a , )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowercase_)
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : int=True):
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****')
SCREAMING_SNAKE_CASE_ : Any = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']})
# Log results
SCREAMING_SNAKE_CASE_ : int = Path(pl_module.hparams.output_dir)
if type_path == "test":
SCREAMING_SNAKE_CASE_ : int = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE_ : Tuple = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE_ : List[Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
SCREAMING_SNAKE_CASE_ : Tuple = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase_)
generations_file.parent.mkdir(exist_ok=lowercase_)
with open(lowercase_ , '''a+''') as writer:
for key in sorted(lowercase_):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metrics[key]
if isinstance(lowercase_ , torch.Tensor):
SCREAMING_SNAKE_CASE_ : Optional[Any] = val.item()
SCREAMING_SNAKE_CASE_ : int = F'{key}: {val:.6f}\n'
writer.write(lowercase_)
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''])
generations_file.open('''w+''').write(lowercase_)
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ : Dict = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE_ : int = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE_ : Any = count_trainable_parameters(lowercase_)
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6})
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(lowercase_ , lowercase_ , '''test''')
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : pl.Trainer , lowercase_ : List[str]):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 318
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
| 1
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input("""Enter image url: """).strip()
print(f'''Downloading image from {url} ...''')
UpperCAmelCase_ : List[Any] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase_ : List[str] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase_ : Dict = requests.get(image_url).content
UpperCAmelCase_ : List[Any] = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 318
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318
| 1
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Tuple=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = start
SCREAMING_SNAKE_CASE_ : int = end
SCREAMING_SNAKE_CASE_ : Any = val
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Dict = left
SCREAMING_SNAKE_CASE_ : List[str] = right
def __repr__( self : Union[str, Any]):
'''simple docstring'''
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Sequence , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = collection
SCREAMING_SNAKE_CASE_ : Any = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[Any] = self._build_tree(0 , len(lowercase_) - 1)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int]):
'''simple docstring'''
self._update_tree(self.root , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any]):
'''simple docstring'''
return self._query_range(self.root , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Any , lowercase_ : Any):
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start])
SCREAMING_SNAKE_CASE_ : Optional[int] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Tuple = self._build_tree(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = self._build_tree(mid + 1 , lowercase_)
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val) , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any]):
'''simple docstring'''
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Dict = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_)
else:
self._update_tree(node.right , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.fn(node.left.val , node.right.val)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid) , self._query_range(node.right , node.mid + 1 , lowercase_) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.root is not None:
SCREAMING_SNAKE_CASE_ : Dict = Queue()
queue.put(self.root)
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
UpperCAmelCase_ : Any = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 318
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318
| 1
|
"""simple docstring"""
def _A (__a , __a ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def _A () -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ""
__UpperCamelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Union[str, Any] , lowercase_ : Optional[DatasetInfo] = None , lowercase_ : Optional[str] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(self , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = repo_info
SCREAMING_SNAKE_CASE_ : List[str] = token
SCREAMING_SNAKE_CASE_ : List[Any] = None
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ : List[str] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase_): {'''name''': str(lowercase_), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : str = "rb" , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowercase_):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}')
SCREAMING_SNAKE_CASE_ : Optional[int] = hf_hub_url(self.repo_info.id , lowercase_ , revision=self.repo_info.sha)
return fsspec.open(
lowercase_ , mode=lowercase_ , headers=get_authentication_headers_for_url(lowercase_ , use_auth_token=self.token) , client_kwargs={'''trust_env''': True} , ).open()
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Dict , **lowercase_ : Any):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._strip_protocol(lowercase_)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False , **lowercase_ : Optional[Any]):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PurePosixPath(path.strip('''/'''))
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ : str = PurePosixPath(p.strip('''/'''))
SCREAMING_SNAKE_CASE_ : Any = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ : int = f
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(paths.values())
if detail:
return out
else:
return sorted(f['''name'''] for f in out)
| 318
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = 13
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 7
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : str = 99
SCREAMING_SNAKE_CASE_ : Any = 32
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : List[str] = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''gelu'''
SCREAMING_SNAKE_CASE_ : int = 0.1
SCREAMING_SNAKE_CASE_ : List[str] = 0.1
SCREAMING_SNAKE_CASE_ : Any = 512
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : List[str] = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : int = None
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = TFDistilBertModel(config=lowercase_)
SCREAMING_SNAKE_CASE_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : int = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TFDistilBertForMaskedLM(config=lowercase_)
SCREAMING_SNAKE_CASE_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TFDistilBertForQuestionAnswering(config=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ : Tuple = TFDistilBertForSequenceClassification(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.num_choices
SCREAMING_SNAKE_CASE_ : str = TFDistilBertForMultipleChoice(lowercase_)
SCREAMING_SNAKE_CASE_ : int = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
SCREAMING_SNAKE_CASE_ : int = tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
SCREAMING_SNAKE_CASE_ : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE_ : int = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFDistilBertForTokenClassification(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCamelCase = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = TFDistilBertModelTester(self)
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=lowercase_ , dim=37)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFDistilBertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_)[0]
SCREAMING_SNAKE_CASE_ : Tuple = [1, 6, 768]
self.assertEqual(output.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : str = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4)
| 318
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def _A (__a , __a ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__a ) )
def _A (__a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
for coeff in reversed(__a ):
SCREAMING_SNAKE_CASE_ : Dict = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 318
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
import numpy
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : numpy.ndarray , lowercase_ : numpy.ndarray):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.random.rand(3 , 1)
# Real output values provided.
SCREAMING_SNAKE_CASE_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE_ : Dict = numpy.zeros(output_array.shape)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
SCREAMING_SNAKE_CASE_ : Tuple = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
SCREAMING_SNAKE_CASE_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : numpy.ndarray , lowercase_ : int , lowercase_ : bool):
'''simple docstring'''
for iteration in range(1 , iterations + 1):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE_ : str = numpy.mean(numpy.square(output - self.feedforward()))
print(F'Iteration {iteration} Loss: {loss}')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : numpy.ndarray):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = input_arr
SCREAMING_SNAKE_CASE_ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
SCREAMING_SNAKE_CASE_ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def _A (__a ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _A (__a ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE_ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=__a , output_array=__a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__a , iterations=10 , give_loss=__a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "timm_backbone"
def __init__( self : Tuple , lowercase_ : Optional[int]=None , lowercase_ : str=3 , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=True , lowercase_ : Dict=None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : int = backbone
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = features_only
SCREAMING_SNAKE_CASE_ : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[Any] = out_indices if out_indices is not None else (-1,)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
UpperCAmelCase_ : Any = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCAmelCase_ : Dict = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
UpperCAmelCase_ : Optional[Any] = BeautifulSoup(res.text, """html.parser""")
UpperCAmelCase_ : List[Any] = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 318
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318
| 1
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = """T5Config"""
def _A (__a , __a , __a ) -> jnp.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = jnp.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = shifted_input_ids.at[:, 0].set(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.where(shifted_input_ids == -1_00 , __a , __a )
return shifted_input_ids
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mt5"
__UpperCamelCase = MTaConfig
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mt5"
__UpperCamelCase = MTaConfig
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mt5"
__UpperCamelCase = MTaConfig
| 318
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "lilt"
def __init__( self : Union[str, Any] , lowercase_ : List[str]=30522 , lowercase_ : Optional[Any]=768 , lowercase_ : List[Any]=12 , lowercase_ : str=12 , lowercase_ : List[Any]=3072 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=512 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=0 , lowercase_ : int="absolute" , lowercase_ : List[str]=None , lowercase_ : Tuple=4 , lowercase_ : int=1024 , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = classifier_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = channel_shrink_ratio
SCREAMING_SNAKE_CASE_ : Any = max_ad_position_embeddings
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 318
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a = 4 ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = abs(__a ) or 4
return [[1 + x + y * row_size for x in range(__a )] for y in range(__a )]
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(__a ) )
# OR.. transpose(reverse_column(matrix))
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(__a ) )
# OR.. reverse_column(reverse_row(matrix))
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(__a ) )
# OR.. transpose(reverse_row(matrix))
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [list(__a ) for x in zip(*__a )]
return matrix
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = matrix[::-1]
return matrix
def _A (__a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [x[::-1] for x in matrix]
return matrix
def _A (__a ) -> None:
"""simple docstring"""
for i in matrix:
print(*__a )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCAmelCase_ : str = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCAmelCase_ : Optional[Any] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
"""simple docstring"""
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE_ : List[str] = n - k
# Calculate C(n,k)
for i in range(__a ):
result *= n - i
result //= i + 1
return result
def _A (__a ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , __a ) // (node_count + 1)
def _A (__a ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
SCREAMING_SNAKE_CASE_ : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _A (__a ) -> int:
"""simple docstring"""
return catalan_number(__a ) * factorial(__a )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 318
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any]=13 , lowercase_ : List[Any]=7 , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : List[str]=False , lowercase_ : int=True , lowercase_ : Dict=99 , lowercase_ : str=32 , lowercase_ : Any=5 , lowercase_ : str=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=3 , lowercase_ : Dict=4 , lowercase_ : Optional[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : str = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : str = num_choices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : Any = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = LlamaModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''single_label_classification'''
SCREAMING_SNAKE_CASE_ : List[str] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : str = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''multi_label_classification'''
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Tuple = LlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model(lowercase_).last_hidden_state
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_model(lowercase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : int = {'''type''': scaling_type, '''factor''': 10.0}
SCREAMING_SNAKE_CASE_ : Tuple = LlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = scaled_model(lowercase_).last_hidden_state
SCREAMING_SNAKE_CASE_ : int = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(lowercase_))
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''')
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(torch.tensor(lowercase_))
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]])
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13])
# fmt: on
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''')
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''')
SCREAMING_SNAKE_CASE_ : List[Any] = model(torch.tensor(lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , lowercase_ , atol=1e-2 , rtol=1e-2)
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip('''Model is curently gated''')
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
SCREAMING_SNAKE_CASE_ : Any = '''Simply put, the theory of relativity states that '''
SCREAMING_SNAKE_CASE_ : Any = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''')
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : Any = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=lowercase_)
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
| 318
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318
| 1
|
"""simple docstring"""
import numpy as np
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a ) -> int:
"""simple docstring"""
if not nums:
return 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nums[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
max_excluding + num,
max(__a , __a ),
)
return max(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = XLMTokenizer
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(lowercase_ , range(len(lowercase_))))
SCREAMING_SNAKE_CASE_ : int = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(lowercase_))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = '''lower newer'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMTokenizer(self.vocab_file , self.merges_file)
SCREAMING_SNAKE_CASE_ : Dict = '''lower'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE_ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 318
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["input_features"]
def __init__( self : int , lowercase_ : str=80 , lowercase_ : Any=16000 , lowercase_ : Any=160 , lowercase_ : int=30 , lowercase_ : Any=400 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=False , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = n_fft
SCREAMING_SNAKE_CASE_ : Tuple = hop_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = chunk_length
SCREAMING_SNAKE_CASE_ : int = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE_ : Tuple = self.n_samples // hop_length
SCREAMING_SNAKE_CASE_ : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.array):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = spectrogram(
lowercase_ , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ : Dict = np.maximum(lowercase_ , log_spec.max() - 8.0)
SCREAMING_SNAKE_CASE_ : Tuple = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _SCREAMING_SNAKE_CASE ( lowercase_ : List[np.ndarray] , lowercase_ : List[np.ndarray] , lowercase_ : float = 0.0):
'''simple docstring'''
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(lowercase_ , np.intaa)
SCREAMING_SNAKE_CASE_ : List[Any] = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1)):
SCREAMING_SNAKE_CASE_ : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE_ : Any = padding_value
normed_input_values.append(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : bool = True , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "max_length" , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
SCREAMING_SNAKE_CASE_ : Tuple = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
SCREAMING_SNAKE_CASE_ : str = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
SCREAMING_SNAKE_CASE_ : Tuple = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
SCREAMING_SNAKE_CASE_ : Any = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : str = [np.asarray([raw_speech]).T]
SCREAMING_SNAKE_CASE_ : int = BatchFeature({'''input_features''': raw_speech})
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : Any = self.pad(
lowercase_ , padding=lowercase_ , max_length=max_length if max_length else self.n_samples , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE_ : Dict = np.stack(padded_inputs['''input_features'''] , axis=0)
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : str = padded_inputs.get('''input_features''').transpose(2 , 0 , 1)
SCREAMING_SNAKE_CASE_ : List[str] = [self._np_extract_fbank_features(lowercase_) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase_):
SCREAMING_SNAKE_CASE_ : str = [np.asarray(lowercase_ , dtype=np.floataa) for feature in input_features]
else:
SCREAMING_SNAKE_CASE_ : Any = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = padded_inputs.convert_to_tensors(lowercase_)
return padded_inputs
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 318
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = TransfoXLTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('''<unk> UNwanted , running''')
self.assertListEqual(lowercase_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [0, 4, 8, 7])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = TransfoXLTokenizer(lower_case=lowercase_)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLTokenizer(lower_case=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowercase_) , lowercase_)
self.assertEqual(tokenizer.convert_tokens_to_string(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = len(lowercase_)
tokenizer.add_tokens(['''new1''', '''new2'''])
tokenizer.move_added_token('''new1''' , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowercase_) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''') , [1])
self.assertEqual(tokenizer.decode([1]) , '''new1''')
| 318
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mobilenet_v2"
def __init__( self : Dict , lowercase_ : Any=3 , lowercase_ : Optional[int]=224 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : List[Any]=8 , lowercase_ : Optional[int]=8 , lowercase_ : str=6 , lowercase_ : Optional[int]=32 , lowercase_ : List[str]=True , lowercase_ : Dict=True , lowercase_ : Optional[int]="relu6" , lowercase_ : Any=True , lowercase_ : Any=0.8 , lowercase_ : Any=0.02 , lowercase_ : List[str]=0.0_01 , lowercase_ : Any=255 , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase_)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Dict = depth_multiplier
SCREAMING_SNAKE_CASE_ : Optional[int] = depth_divisible_by
SCREAMING_SNAKE_CASE_ : List[Any] = min_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = expand_ratio
SCREAMING_SNAKE_CASE_ : int = output_stride
SCREAMING_SNAKE_CASE_ : Optional[int] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : List[str] = finegrained_output
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = tf_padding
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return 1e-4
| 318
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
import requests
def _A (__a ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(__a ).json()
def _A (__a = 10 ) -> list[dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE_ : Tuple = requests.get(__a ).json()[:max_stories]
return [get_hackernews_story(__a ) for story_id in story_ids]
def _A (__a = 10 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = hackernews_top_stories(__a )
return "\n".join('''* [{title}]({url})'''.format(**__a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase_ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
inspect_dataset(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _A (__a , __a ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(__a , __a )
SCREAMING_SNAKE_CASE_ : int = path + '''.py'''
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_dataset_config_info(__a , config_name=__a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _A (__a , __a , __a ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_config_info(__a , config_name=__a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_dataset_config_names(__a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _A (__a , __a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_infos(__a )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_ : List[Any] = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _A (__a , __a , __a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_dataset_infos(__a )
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ):
get_dataset_split_names(__a , config_name=__a )
| 318
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318
| 1
|
"""simple docstring"""
from itertools import product
def _A (__a , __a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = sides_number
SCREAMING_SNAKE_CASE_ : Optional[int] = max_face_number * dice_number
SCREAMING_SNAKE_CASE_ : Any = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : List[str] = range(__a , max_face_number + 1 )
for dice_numbers in product(__a , repeat=__a ):
SCREAMING_SNAKE_CASE_ : List[str] = sum(__a )
totals_frequencies[total] += 1
return totals_frequencies
def _A () -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
SCREAMING_SNAKE_CASE_ : Any = total_frequency_distribution(
sides_number=6 , dice_number=6 )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 9
SCREAMING_SNAKE_CASE_ : List[str] = 4 * 9
SCREAMING_SNAKE_CASE_ : Dict = 6
for peter_total in range(__a , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE_ : List[Any] = (4**9) * (6**6)
SCREAMING_SNAKE_CASE_ : Tuple = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE_ : int = round(__a , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
UpperCAmelCase_ : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A (__a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [False] * len(__a )
SCREAMING_SNAKE_CASE_ : Dict = [s]
SCREAMING_SNAKE_CASE_ : Tuple = True
while queue:
SCREAMING_SNAKE_CASE_ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = u
return visited[t]
def _A (__a , __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [-1] * (len(__a ))
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : List[Any] = float('''Inf''' )
SCREAMING_SNAKE_CASE_ : str = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ : List[Any] = min(__a , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ : int = sink
while v != source:
SCREAMING_SNAKE_CASE_ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ : int = parent[v]
for i in range(len(__a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 318
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : Optional[int] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ : Dict = {
"""google/rembert""": 256,
}
UpperCAmelCase_ : Optional[Any] = """▁"""
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = RemBertTokenizer
def __init__( self : Tuple , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Dict=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Dict="[SEP]" , lowercase_ : List[str]="<unk>" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : Dict="<pad>" , lowercase_ : Dict="[CLS]" , lowercase_ : int="[MASK]" , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE_ : Dict = remove_space
SCREAMING_SNAKE_CASE_ : List[Any] = keep_accents
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[int] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
return [1] + ([0] * len(lowercase_)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_))
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
return (out_vocab_file,)
| 318
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
def _A (__a = 10_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = -1
SCREAMING_SNAKE_CASE_ : Any = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ : Tuple = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ : Optional[int] = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ : int = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 1
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 128 , lowercase_ : int = 256 , lowercase_ : float = 20_00.0 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 64 , lowercase_ : int = 2048 , lowercase_ : float = 0.1 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = nn.Sequential(
nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_) , nn.SiLU() , )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Embedding(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Dropout(p=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.ModuleList()
for lyr_num in range(lowercase_):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_)
self.decoders.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = TaLayerNorm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = nn.Dropout(p=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.conditioning_emb(lowercase_).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE_ : int = torch.broadcast_to(
torch.arange(lowercase_ , device=decoder_input_tokens.device) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE_ : Any = self.position_encoding(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.continuous_inputs_projection(lowercase_)
inputs += position_encodings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dropout(lowercase_)
# decoder: No padding present.
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE_ : List[str] = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
SCREAMING_SNAKE_CASE_ : int = lyr(
lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.decoder_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.post_dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.spec_out(lowercase_)
return spec_out
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[str]=1e-6):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.layer[0](
lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
SCREAMING_SNAKE_CASE_ : str = self.layer[1](
lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE_ : str = self.layer[-1](lowercase_ , lowercase_)
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = TaLayerNorm(lowercase_)
SCREAMING_SNAKE_CASE_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[Any] , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.layer_norm(lowercase_)
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.FiLMLayer(lowercase_ , lowercase_)
# Self-attention block
SCREAMING_SNAKE_CASE_ : int = self.attention(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_states + self.dropout(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaLayerNorm(lowercase_ , eps=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.layer_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.attention(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1) , )
SCREAMING_SNAKE_CASE_ : Any = hidden_states + self.dropout(lowercase_)
return layer_output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaLayerNorm(lowercase_ , eps=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : int=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.layer_norm(lowercase_)
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.film(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.DenseReluDense(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = hidden_states + self.dropout(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.Dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = NewGELUActivation()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.act(self.wi_a(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = self.wi_a(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.wo(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any]=1e-6):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Parameter(torch.ones(lowercase_))
SCREAMING_SNAKE_CASE_ : int = eps
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE_ : Any = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.04_47_15 * torch.pow(lowercase_ , 3.0))))
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : List[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.scale_bias(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = torch.chunk(lowercase_ , 2 , -1)
SCREAMING_SNAKE_CASE_ : Optional[int] = x * (1 + scale) + shift
return x
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ : str = get_tests_dir("""fixtures""")
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = mock.Mock()
SCREAMING_SNAKE_CASE_ : List[str] = 500
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Any = HTTPError
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_) as mock_head:
SCREAMING_SNAKE_CASE_ : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''')
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''')
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : List[str] = CustomFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE_ : Tuple = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowercase_)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''')
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''').input_ids
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''').input_ids
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , labels=lowercase_).loss
SCREAMING_SNAKE_CASE_ : Any = -tf.math.reduce_mean(lowercase_).numpy()
SCREAMING_SNAKE_CASE_ : int = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase_ : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def _A (__a ) -> Tuple:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ : str = k.replace(__a , __a )
return k
def _A (__a , __a ) -> PegasusForConditionalGeneration:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DEFAULTS.copy()
cfg_kwargs.update(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = PegasusConfig(**__a )
SCREAMING_SNAKE_CASE_ : Any = PegasusForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE_ : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ : Any = rename_state_dict_key(__a )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ : Optional[int] = v.T
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(__a , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ : List[str] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE_ : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE_ : str = {k: torch.zeros_like(__a ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = torch_model.model.load_state_dict(__a , strict=__a )
SCREAMING_SNAKE_CASE_ : int = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def _A (__a="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tf.train.list_variables(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : str = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__a , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE_ : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ : List[Any] = tf.train.load_variable(__a , __a )
SCREAMING_SNAKE_CASE_ : str = array
return tf_weights
def _A (__a , __a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(__a ).parent.name
SCREAMING_SNAKE_CASE_ : int = task_specific_params[f'summarization_{dataset}']['''max_position_embeddings''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__a )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__a )
# convert model
SCREAMING_SNAKE_CASE_ : List[Any] = get_tf_weights_as_numpy(__a )
SCREAMING_SNAKE_CASE_ : int = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = task_specific_params
SCREAMING_SNAKE_CASE_ : Tuple = convert_pegasus(__a , __a )
torch_model.save_pretrained(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__a , Path(__a ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCAmelCase_ : Dict = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase_ : List[Any] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase_ : Optional[int] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 318
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318
| 1
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase_ : int = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A (__a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(__a , __a )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : Tuple = getattr(__a , __a ).shape
else:
SCREAMING_SNAKE_CASE_ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Any = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Dict = value
else:
SCREAMING_SNAKE_CASE_ : Dict = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : Tuple = name.split(__a )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Tuple = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : Optional[int] = '''weight'''
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'Unused weights: {unused_weights}' )
def _A (__a , __a , __a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE_ : Dict = int(items[0] )
SCREAMING_SNAKE_CASE_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__a )
@torch.no_grad()
def _A (__a , __a , __a=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.load(__a )
SCREAMING_SNAKE_CASE_ : Tuple = WavLMConfigOrig(checkpoint['''cfg'''] )
SCREAMING_SNAKE_CASE_ : Tuple = WavLMOrig(__a )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Tuple = WavLMConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = WavLMConfig()
SCREAMING_SNAKE_CASE_ : List[Any] = WavLMModel(__a )
recursively_load_weights(__a , __a )
hf_wavlm.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 318
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if self.train_file is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.train_file.split('''.''')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.validation_file.split('''.''')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = None
def __call__( self : Tuple , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [feature.pop(lowercase_) for feature in features]
SCREAMING_SNAKE_CASE_ : List[str] = len(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = len(features[0]['''input_ids'''])
SCREAMING_SNAKE_CASE_ : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase_)] for feature in features
]
SCREAMING_SNAKE_CASE_ : Any = list(chain(*lowercase_))
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
SCREAMING_SNAKE_CASE_ : str = {k: v.view(lowercase_ , lowercase_ , -1) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase_ , dtype=torch.intaa)
return batch
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __a , __a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Tuple = training_args.get_process_log_level()
logger.setLevel(__a )
datasets.utils.logging.set_verbosity(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE_ : Tuple = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = data_args.validation_file
SCREAMING_SNAKE_CASE_ : Any = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset(
__a , data_files=__a , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE_ : List[Any] = [f'ending{i}' for i in range(4 )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''sent1'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''sent2'''
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE_ : Any = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
SCREAMING_SNAKE_CASE_ : Dict = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
SCREAMING_SNAKE_CASE_ : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__a ):
SCREAMING_SNAKE_CASE_ : int = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE_ : int = examples[question_header_name]
SCREAMING_SNAKE_CASE_ : str = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(__a )
]
# Flatten out
SCREAMING_SNAKE_CASE_ : Dict = list(chain(*__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(chain(*__a ) )
# Tokenize
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(
__a , __a , truncation=__a , max_length=__a , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__a ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE_ : Tuple = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : int = min(len(__a ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE_ : str = train_dataset.select(range(__a ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE_ : Any = train_dataset.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE_ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : Any = min(len(__a ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE_ : Optional[int] = eval_dataset.select(range(__a ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE_ : int = eval_dataset.map(
__a , batched=__a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE_ : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__a , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = eval_predictions
SCREAMING_SNAKE_CASE_ : str = np.argmax(__a , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : Optional[Any] = Trainer(
model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__a , data_collator=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Dict = last_checkpoint
SCREAMING_SNAKE_CASE_ : Tuple = trainer.train(resume_from_checkpoint=__a )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE_ : List[Any] = train_result.metrics
SCREAMING_SNAKE_CASE_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__a )
)
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , len(__a ) )
trainer.log_metrics('''train''' , __a )
trainer.save_metrics('''train''' , __a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = min(__a , len(__a ) )
trainer.log_metrics('''eval''' , __a )
trainer.save_metrics('''eval''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
import math
import qiskit
def _A (__a = 1 , __a = 1 , __a = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(__a , __a )
or isinstance(__a , __a )
or isinstance(__a , __a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__a ) != input_a)
or (math.floor(__a ) != input_a)
or (math.floor(__a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.QuantumRegister(4 , '''qr''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
SCREAMING_SNAKE_CASE_ : List[Any] = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.QuantumCircuit(__a , __a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ : int = qiskit.Aer.get_backend('''aer_simulator''' )
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(__a , __a , shots=10_00 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 318
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 1
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "xmod"
def __init__( self : Tuple , lowercase_ : Any=30522 , lowercase_ : List[str]=768 , lowercase_ : List[Any]=12 , lowercase_ : int=12 , lowercase_ : Dict=3072 , lowercase_ : Dict="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Any=0.1 , lowercase_ : Dict=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Dict=1e-12 , lowercase_ : Dict=1 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : str="absolute" , lowercase_ : List[str]=True , lowercase_ : Tuple=None , lowercase_ : List[str]=False , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=False , lowercase_ : Dict=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=("en_XX",) , lowercase_ : Optional[int]=None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : Dict = classifier_dropout
SCREAMING_SNAKE_CASE_ : List[str] = pre_norm
SCREAMING_SNAKE_CASE_ : Union[str, Any] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ : int = adapter_layer_norm
SCREAMING_SNAKE_CASE_ : List[Any] = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ : str = ln_before_adapter
SCREAMING_SNAKE_CASE_ : str = list(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = default_language
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 318
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase_ : str = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCAmelCase_ : Any = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
UpperCAmelCase_ : List[Any] = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = len(references[0])
if any(len(lowercase_) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
SCREAMING_SNAKE_CASE_ : Dict = [[refs[i] for refs in references] for i in range(lowercase_)]
SCREAMING_SNAKE_CASE_ : int = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = sb_ter.corpus_score(lowercase_ , lowercase_)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 318
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _A (__a , __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _A (__a ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _A (__a ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = digit
if sudoku(__a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : Any = 0
return None
def _A (__a ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
UpperCAmelCase_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 318
| 1
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
UpperCAmelCase_ : List[Any] = """pytorch_model.bin"""
@dataclasses.dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "The name of the task to train on."} , )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__UpperCamelCase = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__UpperCamelCase = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__UpperCamelCase = dataclasses.field(
default=1_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__UpperCamelCase = dataclasses.field(
default=1_0_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__UpperCamelCase = dataclasses.field(
default=UpperCAmelCase__ , metadata={"help": "Random seed for initialization."} , )
def _A (__a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(eval_result * len(__a ) )
print(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset.sort('''probability''' , reverse=__a )
SCREAMING_SNAKE_CASE_ : Any = dataset.select(range(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset.remove_columns(['''label''', '''probability'''] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset.rename_column('''prediction''' , '''label''' )
SCREAMING_SNAKE_CASE_ : int = dataset.map(lambda __a : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__a , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(__a , index=__a )
else:
dataset.to_json(__a )
def _A (__a , __a , __a , __a , **__a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = STModelArguments(model_name_or_path=__a )
SCREAMING_SNAKE_CASE_ : int = STDataArguments(train_file=__a , infer_file=__a )
SCREAMING_SNAKE_CASE_ : Tuple = STTrainingArguments(output_dir=__a )
SCREAMING_SNAKE_CASE_ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__a ).items():
setattr(__a , __a , __a )
for key, value in kwargs.items():
if hasattr(__a , __a ):
setattr(__a , __a , __a )
# Sanity checks
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE_ : Tuple = args.train_file
SCREAMING_SNAKE_CASE_ : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE_ : int = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE_ : Optional[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE_ : List[Any] = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
SCREAMING_SNAKE_CASE_ : Any = f'{args.output_dir}/self-train_iter-{{}}'.format
SCREAMING_SNAKE_CASE_ : int = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__a )
os.makedirs(__a , exist_ok=__a )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = False
# Show the progress bar
SCREAMING_SNAKE_CASE_ : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE_ : Any = data_dir_format(__a )
assert os.path.exists(__a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__a , '''stage-1''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__a , __a ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__a , '''best-checkpoint''' , __a )
if os.path.exists(__a ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , __a , __a , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , __a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE_ : int = os.path.join(__a , '''best-checkpoint''' )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(__a , '''stage-2''' )
# Update arguments_dict
SCREAMING_SNAKE_CASE_ : List[str] = model_path
SCREAMING_SNAKE_CASE_ : int = data_files['''train''']
SCREAMING_SNAKE_CASE_ : str = current_output_dir
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(__a , '''best-checkpoint''' , __a )
if os.path.exists(__a ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , __a , __a , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , __a )
SCREAMING_SNAKE_CASE_ : Dict = iteration
SCREAMING_SNAKE_CASE_ : Dict = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(os.path.join(__a , '''best-checkpoint''' ) )
SCREAMING_SNAKE_CASE_ : int = config.idalabel
SCREAMING_SNAKE_CASE_ : str = os.path.join(__a , '''eval_results_best-checkpoint.json''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__a , '''test_results_best-checkpoint.json''' )
assert os.path.exists(__a )
with open(__a , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = float(json.load(__a )[args.eval_metric] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(__a , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__a )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__a , exist_ok=__a )
shutil.copy(__a , os.path.join(__a , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(__a ):
shutil.copy(__a , os.path.join(__a , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__a , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE_ : Optional[int] = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE_ : List[Any] = new_iteration
SCREAMING_SNAKE_CASE_ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_iteration
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_eval_result
SCREAMING_SNAKE_CASE_ : int = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE_ : List[str] = new_iteration
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , __a )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , f'eval_results_iter-{iteration}.json' ) , os.path.join(__a , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(__a , '''eval_results_best-iteration.json''' ) , )
| 318
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.feature_extraction_class(**self.feat_extract_dict)
SCREAMING_SNAKE_CASE_ : Any = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowercase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class.from_json_file(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[int] = feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(lowercase_)
| 318
|
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5
def _A (__a , __a , __a = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ : Any = 50000
UpperCAmelCase_ : Optional[Any] = 5000
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = os.path.split(__file__)
UpperCAmelCase_ : List[Any] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : Tuple = dataset[i]
@get_duration
def _A (__a , __a , __a ) -> Optional[int]:
"""simple docstring"""
for i in range(0 , len(__a ) , __a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset[i : i + batch_size]
@get_duration
def _A (__a , __a , __a ) -> Any:
"""simple docstring"""
with dataset.formatted_as(type=__a ):
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : Tuple = dataset[i]
@get_duration
def _A (__a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
with dataset.formatted_as(type=__a ):
for i in range(0 , __a , __a ):
SCREAMING_SNAKE_CASE_ : Dict = dataset[i : i + batch_size]
def _A () -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {'''num examples''': SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
SCREAMING_SNAKE_CASE_ : List[Any] = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
SCREAMING_SNAKE_CASE_ : List[Any] = generate_example_dataset(
os.path.join(__a , '''dataset.arrow''' ) , __a , num_examples=__a , seq_shapes={'''list''': (1_00,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(__a ) )
SCREAMING_SNAKE_CASE_ : Tuple = func(__a , **__a )
print('''shuffling dataset''' )
SCREAMING_SNAKE_CASE_ : str = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = func(
__a , **__a )
with open(__a , '''wb''' ) as f:
f.write(json.dumps(__a ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 318
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (CMStochasticIterativeScheduler,)
__UpperCamelCase = 1_0
def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**lowercase_)
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = 10
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0](**lowercase_)
scheduler.set_timesteps(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = 1
scheduler.set_timesteps(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_):
# 1. scale model input
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = pred_prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 1_92.76_14) < 1e-2
assert abs(result_mean.item() - 0.25_10) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [106, 0]
scheduler.set_timesteps(timesteps=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE_ : str = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 3_47.63_57) < 1e-2
assert abs(result_mean.item() - 0.45_27) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ , msg='''`timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_)
with self.assertRaises(lowercase_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase_)
| 318
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ : Any = """examples/"""
UpperCAmelCase_ : Optional[int] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ : List[Any] = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ : Optional[int] = """README.md"""
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a )
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__a )
def _A (__a ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' )
def _A (__a , __a=False ) -> List[str]:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a , __a , __a )
if not patch:
update_version_in_examples(__a )
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?'''
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__a )
def _A () -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.read()
SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _A (__a=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(__a , patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A () -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_version()
SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 318
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
# setable values
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowercase_ : CommonSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray):
'''simple docstring'''
return cls(common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_)
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCamelCase = 42
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Any , lowercase_ : int = 1000 , lowercase_ : float = 0.00_01 , lowercase_ : float = 0.02 , lowercase_ : str = "linear" , lowercase_ : Optional[jnp.ndarray] = None , lowercase_ : str = "fixed_small" , lowercase_ : bool = True , lowercase_ : str = "epsilon" , lowercase_ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = dtype
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[CommonSchedulerState] = None):
'''simple docstring'''
if common is None:
SCREAMING_SNAKE_CASE_ : Dict = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(1.0 , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : Any = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : Optional[int] = None):
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : DDPMSchedulerState , lowercase_ : int , lowercase_ : Tuple = ()):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : Tuple = (jnp.arange(0 , lowercase_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowercase_ , timesteps=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : DDPMSchedulerState , lowercase_ : Optional[int] , lowercase_ : Optional[int]=None , lowercase_ : str=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE_ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE_ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE_ : Dict = jnp.clip(lowercase_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_ : Dict = jnp.log(jnp.clip(lowercase_ , a_min=1e-20))
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE_ : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE_ : Any = variance
SCREAMING_SNAKE_CASE_ : Tuple = state.common.betas[t]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE_ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : int , lowercase_ : jnp.ndarray , lowercase_ : Optional[jax.random.KeyArray] = None , lowercase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = timestep
if key is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.split(lowercase_ , sample.shape[1] , axis=1)
else:
SCREAMING_SNAKE_CASE_ : str = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE_ : int = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
SCREAMING_SNAKE_CASE_ : str = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.clip(lowercase_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE_ : Any = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(lowercase_ , num=1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.normal(lowercase_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(lowercase_ , lowercase_ , predicted_variance=lowercase_) ** 0.5) * noise
SCREAMING_SNAKE_CASE_ : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
SCREAMING_SNAKE_CASE_ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowercase_ , state=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , lowercase_ , lowercase_ , lowercase_)
def __len__( self : int):
'''simple docstring'''
return self.config.num_train_timesteps
| 318
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DanceDiffusionPipeline
__UpperCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__UpperCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase_ , use_timestep_embedding=lowercase_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = IPNDMScheduler()
SCREAMING_SNAKE_CASE_ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = DanceDiffusionPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = output.audios
SCREAMING_SNAKE_CASE_ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE_ : Any = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00])
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch_device
SCREAMING_SNAKE_CASE_ : Optional[int] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''')
SCREAMING_SNAKE_CASE_ : List[Any] = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.0_96)
SCREAMING_SNAKE_CASE_ : Any = output.audios
SCREAMING_SNAKE_CASE_ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20])
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = torch_device
SCREAMING_SNAKE_CASE_ : str = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(generator=lowercase_ , num_inference_steps=100 , audio_length_in_s=4.0_96)
SCREAMING_SNAKE_CASE_ : Any = output.audios
SCREAMING_SNAKE_CASE_ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41])
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
| 318
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_failure_array(__a )
# 2) Step through text searching for pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = 0, 0 # index into text, pattern
while i < len(__a ):
if pattern[j] == text[i]:
if j == (len(__a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = failure[j - 1]
continue
i += 1
return False
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [0]
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while j < len(__a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
SCREAMING_SNAKE_CASE_ : Tuple = failure[i - 1]
continue
j += 1
failure.append(__a )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase_ : Any = """abc1abc12"""
UpperCAmelCase_ : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCAmelCase_ : Union[str, Any] = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase_ : Optional[Any] = """ABABX"""
UpperCAmelCase_ : List[Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase_ : Tuple = """AAAB"""
UpperCAmelCase_ : Optional[Any] = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase_ : Dict = """abcdabcy"""
UpperCAmelCase_ : Dict = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase_ : Any = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 318
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318
| 1
|
"""simple docstring"""
from math import loga
def _A (__a ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__a , __a ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : int = 65536 , lowercase_ : Optional[int] = None , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 0 , lowercase_ : str = "fourier" , lowercase_ : bool = True , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase_ : Tuple[str] = "UNetMidBlock1D" , lowercase_ : str = None , lowercase_ : Tuple[int] = (32, 32, 64) , lowercase_ : str = None , lowercase_ : int = 8 , lowercase_ : int = 1 , lowercase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE_ : str = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase_ , log=lowercase_ , flip_sin_to_cos=lowercase_)
SCREAMING_SNAKE_CASE_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase_ , downscale_freq_shift=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE_ : Dict = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE_ : Dict = TimestepEmbedding(
in_channels=lowercase_ , time_embed_dim=lowercase_ , act_fn=lowercase_ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE_ : int = nn.ModuleList([])
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.ModuleList([])
SCREAMING_SNAKE_CASE_ : List[str] = None
# down
SCREAMING_SNAKE_CASE_ : Tuple = in_channels
for i, down_block_type in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_channel
SCREAMING_SNAKE_CASE_ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE_ : List[str] = i == len(lowercase_) - 1
SCREAMING_SNAKE_CASE_ : Any = get_down_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase_)
# mid
SCREAMING_SNAKE_CASE_ : Dict = get_mid_block(
lowercase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase_ , add_downsample=lowercase_ , )
# up
SCREAMING_SNAKE_CASE_ : Optional[int] = list(reversed(lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE_ : Any = out_channels
else:
SCREAMING_SNAKE_CASE_ : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Tuple = output_channel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(lowercase_) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE_ : Dict = i == len(lowercase_) - 1
SCREAMING_SNAKE_CASE_ : Tuple = get_up_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase_)
SCREAMING_SNAKE_CASE_ : int = output_channel
# out
SCREAMING_SNAKE_CASE_ : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
SCREAMING_SNAKE_CASE_ : str = get_out_block(
out_block_type=lowercase_ , num_groups_out=lowercase_ , embed_dim=block_out_channels[0] , out_channels=lowercase_ , act_fn=lowercase_ , fc_dim=block_out_channels[-1] // 4 , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : torch.FloatTensor , lowercase_ : Union[torch.Tensor, float, int] , lowercase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = timestep
if not torch.is_tensor(lowercase_):
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(lowercase_) and len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ : str = timesteps[None].to(sample.device)
SCREAMING_SNAKE_CASE_ : Dict = self.time_proj(lowercase_)
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.time_mlp(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = timestep_embed[..., None]
SCREAMING_SNAKE_CASE_ : int = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
SCREAMING_SNAKE_CASE_ : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = downsample_block(hidden_states=lowercase_ , temb=lowercase_)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE_ : Dict = self.mid_block(lowercase_ , lowercase_)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
SCREAMING_SNAKE_CASE_ : List[str] = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE_ : int = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE_ : Optional[int] = upsample_block(lowercase_ , res_hidden_states_tuple=lowercase_ , temb=lowercase_)
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE_ : str = self.out_block(lowercase_ , lowercase_)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase_)
| 318
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_ : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase_)
self.assertIsInstance(processor_fast.tokenizer , lowercase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase_)
self.assertIsInstance(processor_fast.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 318
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "transfo-xl"
__UpperCamelCase = ["mems"]
__UpperCamelCase = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowercase_ : Any=267735 , lowercase_ : int=[20000, 40000, 200000] , lowercase_ : Tuple=1024 , lowercase_ : str=1024 , lowercase_ : Dict=16 , lowercase_ : Tuple=64 , lowercase_ : List[Any]=4096 , lowercase_ : Any=4 , lowercase_ : str=False , lowercase_ : Any=18 , lowercase_ : Union[str, Any]=1600 , lowercase_ : Optional[Any]=1000 , lowercase_ : Dict=True , lowercase_ : Optional[Any]=True , lowercase_ : Dict=0 , lowercase_ : Optional[Any]=-1 , lowercase_ : str=True , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]="normal" , lowercase_ : Any=0.01 , lowercase_ : int=0.01 , lowercase_ : Any=0.02 , lowercase_ : List[str]=1e-5 , lowercase_ : List[Any]=0 , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = []
self.cutoffs.extend(lowercase_)
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE_ : Any = [False] + [True] * len(self.cutoffs)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [False] + [False] * len(self.cutoffs)
SCREAMING_SNAKE_CASE_ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = d_embed
SCREAMING_SNAKE_CASE_ : str = d_head
SCREAMING_SNAKE_CASE_ : Any = d_inner
SCREAMING_SNAKE_CASE_ : int = div_val
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pre_lnorm
SCREAMING_SNAKE_CASE_ : Any = n_layer
SCREAMING_SNAKE_CASE_ : List[Any] = n_head
SCREAMING_SNAKE_CASE_ : Optional[int] = mem_len
SCREAMING_SNAKE_CASE_ : Optional[int] = same_length
SCREAMING_SNAKE_CASE_ : int = attn_type
SCREAMING_SNAKE_CASE_ : int = clamp_len
SCREAMING_SNAKE_CASE_ : Tuple = sample_softmax
SCREAMING_SNAKE_CASE_ : Tuple = adaptive
SCREAMING_SNAKE_CASE_ : Dict = dropout
SCREAMING_SNAKE_CASE_ : List[str] = dropatt
SCREAMING_SNAKE_CASE_ : Tuple = untie_r
SCREAMING_SNAKE_CASE_ : Optional[int] = init
SCREAMING_SNAKE_CASE_ : int = init_range
SCREAMING_SNAKE_CASE_ : Dict = proj_init_std
SCREAMING_SNAKE_CASE_ : int = init_std
SCREAMING_SNAKE_CASE_ : str = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.')
return -1
@max_position_embeddings.setter
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[Any]):
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.')
| 318
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase_ : List[Any] = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318
|
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AudioLDMPipeline
__UpperCamelCase = TEXT_TO_AUDIO_PARAMS
__UpperCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ : List[Any] = ClapTextModelWithProjection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77)
SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str] , lowercase_ : int=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_) == 256
SCREAMING_SNAKE_CASE_ : Tuple = audio[:10]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs['''prompt''']]
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs.pop('''prompt''')]
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.tokenizer(
lowercase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ : Any = text_inputs['''input_ids'''].to(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder(
lowercase_ , )
SCREAMING_SNAKE_CASE_ : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : str = F.normalize(lowercase_ , dim=-1)
SCREAMING_SNAKE_CASE_ : int = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe.to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : str = 3 * ['''this is a negative prompt''']
SCREAMING_SNAKE_CASE_ : int = negative_prompt
SCREAMING_SNAKE_CASE_ : str = 3 * [inputs['''prompt''']]
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = 3 * [inputs.pop('''prompt''')]
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer(
lowercase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['''input_ids'''].to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe.text_encoder(
lowercase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(lowercase_ , dim=-1)
embeds.append(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = embeds
# forward
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = PNDMScheduler(skip_prk_steps=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = '''egg cracking'''
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**lowercase_ , negative_prompt=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_) == 256
SCREAMING_SNAKE_CASE_ : List[str] = audio[:10]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = PNDMScheduler(skip_prk_steps=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(lowercase_ , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(lowercase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowercase_).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase_).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.0_16 , **lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_) / vocoder_sampling_rate == 0.0_16
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(audio_length_in_s=0.0_32 , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_) / vocoder_sampling_rate == 0.0_32
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline(**lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ['''hey''']
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(lowercase_ , num_inference_steps=1)
SCREAMING_SNAKE_CASE_ : str = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ : Tuple = SpeechTaHifiGan(lowercase_).to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(lowercase_ , num_inference_steps=1)
SCREAMING_SNAKE_CASE_ : str = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase_)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_)
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple="cpu" , lowercase_ : Optional[int]=torch.floataa , lowercase_ : Any=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : str = np.random.RandomState(lowercase_).standard_normal((1, 8, 128, 16))
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(lowercase_).to(device=lowercase_ , dtype=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = 25
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**lowercase_).audios[0]
assert audio.ndim == 1
assert len(lowercase_) == 81920
SCREAMING_SNAKE_CASE_ : Optional[int] = audio[77230:77240]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15])
SCREAMING_SNAKE_CASE_ : Dict = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''')
SCREAMING_SNAKE_CASE_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe.to(lowercase_)
audioldm_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(**lowercase_).audios[0]
assert audio.ndim == 1
assert len(lowercase_) == 81920
SCREAMING_SNAKE_CASE_ : Dict = audio[27780:27790]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12])
SCREAMING_SNAKE_CASE_ : Any = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A (__a ) -> List[Any]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_type_to_module_name(__a )
SCREAMING_SNAKE_CASE_ : List[str] = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _A (__a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowercase_ : int , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''config''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''trust_remote_code''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = config_dict.get('''feature_extractor_type''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
SCREAMING_SNAKE_CASE_ : str = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_)
if hasattr(lowercase_ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class_from_name(lowercase_)
SCREAMING_SNAKE_CASE_ : int = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class is not None or type(lowercase_) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''code_revision''' , lowercase_)
if os.path.isdir(lowercase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : int = FEATURE_EXTRACTOR_MAPPING[type(lowercase_)]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Any):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A (__a , __a , __a , __a ) -> Any:
"""simple docstring"""
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : List[Any] = np.full((len(__a ), sequence_length, 2) , __a )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.full((len(__a ), sequence_length) , __a )
for i, tensor in enumerate(__a ):
if padding_side == "right":
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def _A (__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ord(__a )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unicodedata.category(__a )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = -1_0_0
__UpperCamelCase = "pt"
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any]):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_ : Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE_ : str = torch.tensor(batch['''entity_ids''']).shape[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE_ : Any = [
list(lowercase_) + [self.label_pad_token_id] * (sequence_length - len(lowercase_)) for label in labels
]
else:
SCREAMING_SNAKE_CASE_ : Dict = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_)) + list(lowercase_) for label in labels
]
SCREAMING_SNAKE_CASE_ : Dict = [feature['''ner_tags'''] for feature in features]
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding_tensor(lowercase_ , -1 , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = [feature['''original_entity_spans'''] for feature in features]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = padding_tensor(lowercase_ , (-1, -1) , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {k: torch.tensor(lowercase_ , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 318
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase_ : Optional[int] = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase_ : Dict = """UperNetConfig"""
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Union[int, Tuple[int, int]] , lowercase_ : Union[int, Tuple[int, int], str] = 0 , lowercase_ : bool = False , lowercase_ : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , )
SCREAMING_SNAKE_CASE_ : int = nn.BatchNormad(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.ReLU()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.conv(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = self.activation(lowercase_)
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = [
nn.AdaptiveAvgPoolad(lowercase_),
UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = input
for layer in self.layers:
SCREAMING_SNAKE_CASE_ : List[Any] = layer(lowercase_)
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple[int, ...] , lowercase_ : int , lowercase_ : int , lowercase_ : bool):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pool_scales
SCREAMING_SNAKE_CASE_ : List[str] = align_corners
SCREAMING_SNAKE_CASE_ : Tuple = in_channels
SCREAMING_SNAKE_CASE_ : Tuple = channels
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, pool_scale in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_)
self.blocks.append(lowercase_)
self.add_module(str(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE_ : List[Any] = ppm(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.functional.interpolate(
lowercase_ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners)
ppm_outs.append(lowercase_)
return ppm_outs
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = config
SCREAMING_SNAKE_CASE_ : str = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE_ : Dict = config.hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1)
# PSP Module
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE_ : Tuple = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE_ : int = UperNetConvModule(lowercase_ , self.channels , kernel_size=1)
SCREAMING_SNAKE_CASE_ : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1)
self.lateral_convs.append(lowercase_)
self.fpn_convs.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UperNetConvModule(
len(self.in_channels) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.apply(self._init_weights)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = inputs[-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [x]
psp_outs.extend(self.psp_modules(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(lowercase_ , dim=1)
SCREAMING_SNAKE_CASE_ : Dict = self.bottleneck(lowercase_)
return output
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(lowercase_))
# build top-down path
SCREAMING_SNAKE_CASE_ : List[Any] = len(lowercase_)
for i in range(used_backbone_levels - 1 , 0 , -1):
SCREAMING_SNAKE_CASE_ : Any = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE_ : List[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowercase_ , mode='''bilinear''' , align_corners=self.align_corners)
# build outputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1 , 0 , -1):
SCREAMING_SNAKE_CASE_ : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(lowercase_ , dim=1)
SCREAMING_SNAKE_CASE_ : Dict = self.fpn_bottleneck(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.classifier(lowercase_)
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase_ : Dict , lowercase_ : int = 2 , lowercase_ : int = 3 , lowercase_ : Union[int, Tuple[int, int]] = 1):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = config
SCREAMING_SNAKE_CASE_ : Dict = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE_ : Tuple = config.auxiliary_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE_ : Dict = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE_ : Tuple = in_index
SCREAMING_SNAKE_CASE_ : int = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE_ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_))
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_))
if self.num_convs == 0:
SCREAMING_SNAKE_CASE_ : str = nn.Identity()
else:
SCREAMING_SNAKE_CASE_ : Tuple = nn.Sequential(*lowercase_)
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2)
SCREAMING_SNAKE_CASE_ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self.apply(self._init_weights)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[str]):
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : torch.Tensor):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE_ : Tuple = self.convs(lowercase_)
if self.concat_input:
SCREAMING_SNAKE_CASE_ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1))
SCREAMING_SNAKE_CASE_ : Any = self.classifier(lowercase_)
return output
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = UperNetConfig
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : List[Any]=False):
'''simple docstring'''
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
UpperCAmelCase_ : Optional[int] = r"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ : int = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , lowercase_ : str):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = AutoBackbone.from_config(config.backbone_config)
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE_ : Dict = UperNetHead(lowercase_ , in_channels=self.backbone.channels)
SCREAMING_SNAKE_CASE_ : List[Any] = UperNetFCNHead(lowercase_) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length'''))
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : str = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE_ : Dict = self.backbone.forward_with_filtered_kwargs(
lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = outputs.feature_maps
SCREAMING_SNAKE_CASE_ : int = self.decode_head(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : int = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE_ : Any = self.auxiliary_head(lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.functional.interpolate(
lowercase_ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''')
else:
# compute weighted loss
SCREAMING_SNAKE_CASE_ : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
SCREAMING_SNAKE_CASE_ : Dict = loss_fct(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss_fct(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE_ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 318
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : str = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : List[str] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
UpperCAmelCase_ : Optional[int] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
UpperCAmelCase_ : Tuple = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string'''),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''')),
},
'''references''': {
'''id''': datasets.Value('''string'''),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string'''),
'''answer_start''': datasets.Value('''int32'''),
}),
},
}) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ : Dict = evaluate(dataset=lowercase_ , predictions=lowercase_)
return score
| 318
|
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318
| 1
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCAmelCase_ : str = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
UpperCAmelCase_ : Union[str, Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = '''rougeLsum'''
SCREAMING_SNAKE_CASE_ : Tuple = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k]
SCREAMING_SNAKE_CASE_ : Optional[int] = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k] )[k]
assert score > score_no_sep
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['''rouge1''', '''rouge2''', '''rougeL''']
SCREAMING_SNAKE_CASE_ : str = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a )
SCREAMING_SNAKE_CASE_ : str = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a )
assert score_sep == score_no_sep
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__a , __a , newline_sep=__a ) == calculate_rouge(__a , __a , newline_sep=__a )
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
SCREAMING_SNAKE_CASE_ : str = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
SCREAMING_SNAKE_CASE_ : List[Any] = calculate_rouge(__a , __a , rouge_keys=['''rougeLsum'''] , newline_sep=__a )['''rougeLsum''']
SCREAMING_SNAKE_CASE_ : int = calculate_rouge(__a , __a , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
SCREAMING_SNAKE_CASE_ : List[str] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__a , __a )
SCREAMING_SNAKE_CASE_ : List[str] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__a )
assert isinstance(__a , __a )
| 318
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.