code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 13 , __lowercase = 64 , __lowercase = 2 , __lowercase = 3 , __lowercase = 3 , __lowercase = True , __lowercase = True , __lowercase = 128 , __lowercase=[16, 32, 64, 128] , __lowercase = 7 , __lowercase = 4 , __lowercase = 37 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 10 , __lowercase = 0.02 , __lowercase = 2 , __lowercase = 1 , __lowercase = 128 , __lowercase = [2, 2, 2, 2] , __lowercase = 2 , __lowercase = 2 , ) -> int:
__UpperCamelCase :int = parent
__UpperCamelCase :Any = batch_size
__UpperCamelCase :Union[str, Any] = image_size
__UpperCamelCase :str = patch_size
__UpperCamelCase :List[Any] = num_channels
__UpperCamelCase :List[Any] = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :Dict = num_attention_heads
__UpperCamelCase :List[Any] = intermediate_size
__UpperCamelCase :int = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase :Dict = type_sequence_label_size
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :List[str] = encoder_stride
__UpperCamelCase :Any = num_attention_outputs
__UpperCamelCase :str = embed_dim
__UpperCamelCase :str = embed_dim + 1
__UpperCamelCase :Dict = resolution
__UpperCamelCase :Optional[Any] = depths
__UpperCamelCase :List[str] = hidden_sizes
__UpperCamelCase :str = dim
__UpperCamelCase :str = mlp_expansion_ratio
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :List[Any] = None
if self.use_labels:
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Optional[int]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Optional[Any] = TFEfficientFormerModel(config=A_)
__UpperCamelCase :Any = model(A_ , training=A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Dict = self.type_sequence_label_size
__UpperCamelCase :Optional[Any] = TFEfficientFormerForImageClassification(A_)
__UpperCamelCase :Union[str, Any] = model(A_ , labels=A_ , training=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCamelCase :Optional[Any] = 1
__UpperCamelCase :Any = TFEfficientFormerForImageClassification(A_)
__UpperCamelCase :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Any = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = config_and_inputs
__UpperCamelCase :Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a__ : Optional[int] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a__ : Any = False
a__ : Tuple = False
a__ : List[Any] = False
a__ : List[str] = False
a__ : List[str] = False
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = TFEfficientFormerModelTester(self)
__UpperCamelCase :Optional[int] = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37)
def UpperCamelCase__ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def UpperCamelCase__ ( self) -> int:
pass
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = model_class(A_)
__UpperCamelCase :Tuple = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :List[str] = [*signature.parameters.keys()]
__UpperCamelCase :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCamelCase__ ( self) -> Optional[int]:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = model_class(A_)
__UpperCamelCase :Any = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase :List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(A_) , A_)
if hasattr(self.model_tester , '''encoder_seq_length'''):
__UpperCamelCase :Union[str, Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
__UpperCamelCase :List[Any] = seq_length * self.model_tester.chunk_length
else:
__UpperCamelCase :List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__UpperCamelCase :Dict = outputs.decoder_hidden_states
self.asseretIsInstance(A_ , (list, tuple))
self.assertEqual(len(A_) , A_)
__UpperCamelCase :str = getattr(self.model_tester , '''seq_length''' , A_)
__UpperCamelCase :int = getattr(self.model_tester , '''decoder_seq_length''' , A_)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[Any] = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase :List[str] = True
check_hidden_states_output(A_ , A_ , A_)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = TFEfficientFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase , __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :int = True
__UpperCamelCase :List[Any] = getattr(self.model_tester , '''seq_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''encoder_seq_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''key_length''' , A_)
__UpperCamelCase :Optional[int] = getattr(self.model_tester , '''chunk_length''' , A_)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
__UpperCamelCase :Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = True
__UpperCamelCase :List[str] = False
__UpperCamelCase :int = True
__UpperCamelCase :List[str] = model_class(A_)
__UpperCamelCase :Optional[Any] = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase :str = True
__UpperCamelCase :str = model_class(A_)
__UpperCamelCase :Union[str, Any] = model(**self._prepare_for_class(A_ , A_) , training=A_)
__UpperCamelCase :Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase , __UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__UpperCamelCase :List[str] = model_class(A_)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__UpperCamelCase :List[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=A_)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__UpperCamelCase :Optional[int] = model(A_)
self.assertTrue(outputs_dict is not None)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :Union[str, Any] = prepare_img()
__UpperCamelCase :Any = image_processor(images=A_ , return_tensors='''tf''')
# forward pass
__UpperCamelCase :List[str] = model(**A_ , training=A_)
# verify the logits
__UpperCamelCase :str = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , A_)
__UpperCamelCase :List[Any] = tf.constant([-0.05_55, 0.48_25, -0.08_52])
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :List[Any] = prepare_img()
__UpperCamelCase :Optional[Any] = image_processor(images=A_ , return_tensors='''tf''')
# forward pass
__UpperCamelCase :int = model(**A_ , training=A_)
# verify the logits
__UpperCamelCase :Tuple = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , A_)
__UpperCamelCase :Any = tf.constant([-0.13_12, 0.43_53, -1.04_99])
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4))
| 167
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a__ ( a = "isbn/0140328726" ) -> dict:
A_ : Optional[Any] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
A_ : Any = f"""{olid} is not a valid Open Library olid"""
raise ValueError(lowerCAmelCase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def a__ ( a ) -> dict:
A_ : Dict = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
A_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A_ : Optional[int] = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
A_ : Any = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A_ : Any = ''', '''.join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowerCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
_lowerCAmelCase = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('\n'.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 721
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = 10
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = [1, 2, 3, 4]
A_ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
A_ , A_ : Optional[Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = ''''''
A_ , A_ : Union[str, Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
A_ , A_ : Optional[int] = process_story(__magic_name__ )
A_ : List[str] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__magic_name__ , __magic_name__ )
A_ : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = torch.tensor([1, 2, 3, 4] )
A_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = 101
A_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Optional[Any] = compute_token_type_ids(__magic_name__ , __magic_name__ )
np.testing.assert_array_equal(__magic_name__ , __magic_name__ )
| 236
| 0
|
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase__ ,lowercase__ = True, True
lowercase__ = dfs(A , A , A , A )
return path
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase__ ,lowercase__ = check_circuit_or_path(A , A )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowercase__ = 1
if check == 2:
lowercase__ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowercase__ = dfs(A , A , A )
print(A )
def _SCREAMING_SNAKE_CASE () -> str:
"""simple docstring"""
lowercase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase__ = {
1: [],
2: []
# all degree is zero
}
lowercase__ = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 460
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(A ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(A ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(A )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
torch.save(A , args.output )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCamelCase : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 460
| 1
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a__ ( *_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Any=2 ) -> str:
"""simple docstring"""
from .. import __version__
UpperCAmelCase_ : Tuple = take_from
UpperCAmelCase_ : str = ()
if not isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
UpperCAmelCase_ : int = None
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_SCREAMING_SNAKE_CASE ),)
UpperCAmelCase_ : int = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
values += (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),)
UpperCAmelCase_ : Union[str, Any] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
UpperCAmelCase_ : Dict = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
UpperCAmelCase_ : Dict = warning + " " if standard_warn else ""
warnings.warn(warning + message , _SCREAMING_SNAKE_CASE , stacklevel=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase_ : Dict = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase_ : Optional[Any] = call_frame.filename
UpperCAmelCase_ : Dict = call_frame.lineno
UpperCAmelCase_ : Tuple = call_frame.function
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return
elif len(_SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 702
|
'''simple docstring'''
from math import factorial
_lowerCamelCase = {str(d): factorial(d) for d in range(10)}
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(_SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 323
| 0
|
"""simple docstring"""
from __future__ import annotations
def A_ ( snake_case__ ) -> bool:
_UpperCamelCase :List[str] = str(snake_case_ )
return len(snake_case_ ) == 9 and set(snake_case_ ) == set('''123456789''' )
def A_ ( ) -> int | None:
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCamelCase :Tuple = 10_00_02 * base_num
if is_9_pandigital(snake_case_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCamelCase :int = 1_00_20_03 * base_num
if is_9_pandigital(snake_case_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, **snake_case_ ) -> Union[str, Any]:
A__ : Optional[Any] =[x.strip() for x in open(snake_case_ ).readlines()]
A__ : List[Any] =[x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
A__ : Tuple =calculate_rouge(snake_case_, snake_case_, **snake_case_ )
if save_path is not None:
save_json(snake_case_, snake_case_, indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 416
| 0
|
from collections import Counter
from timeit import timeit
def __UpperCAmelCase ( a_ = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '').lower()).values()) < 2
def __UpperCAmelCase ( a_ = ""):
if len(a_) == 0:
return True
snake_case_ = input_str.replace(' ' , '').lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case_ = {}
for character in lower_case_input_str:
snake_case_ = character_freq_dict.get(a_ , 0) + 1
snake_case_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCAmelCase ( a_ = ""):
print('\nFor string = ' , a_ , ':')
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(a_) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(a_) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowercase = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 607
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase = get_tests_dir("fixtures")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=a ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _UpperCamelCase ( self ) -> Optional[int]:
with self.assertRaises(a ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
snake_case_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(a )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCamelCase ( cls ) -> int:
snake_case_ = TOKEN
HfFolder.save_token(a )
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='test-image-processor' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> int:
snake_case_ = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='valid_org/test-image-processor-org' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> int:
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(a )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
snake_case_ = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 607
| 1
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__snake_case : str = b * b - 4 * a * c
__snake_case : Optional[Any] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
__snake_case : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 26
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a = logging.get_logger(__name__)
a = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ (snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = 'focalnet'
def __init__( self: Optional[int] , _UpperCAmelCase: Tuple=224 , _UpperCAmelCase: List[Any]=4 , _UpperCAmelCase: int=3 , _UpperCAmelCase: List[str]=96 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: int=[192, 384, 768, 768] , _UpperCAmelCase: Dict=[2, 2, 6, 2] , _UpperCAmelCase: List[Any]=[2, 2, 2, 2] , _UpperCAmelCase: Optional[Any]=[3, 3, 3, 3] , _UpperCAmelCase: List[str]="gelu" , _UpperCAmelCase: int=4.0 , _UpperCAmelCase: List[str]=0.0 , _UpperCAmelCase: Tuple=0.1 , _UpperCAmelCase: List[Any]=False , _UpperCAmelCase: Optional[int]=1e-4 , _UpperCAmelCase: Union[str, Any]=False , _UpperCAmelCase: Union[str, Any]=False , _UpperCAmelCase: int=False , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: str=1e-5 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: Dict=None , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[Any] , ):
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase :int = image_size
_lowerCAmelCase :List[str] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :str = embed_dim
_lowerCAmelCase :Optional[int] = use_conv_embed
_lowerCAmelCase :Dict = hidden_sizes
_lowerCAmelCase :Optional[int] = depths
_lowerCAmelCase :List[Any] = focal_levels
_lowerCAmelCase :Union[str, Any] = focal_windows
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = mlp_ratio
_lowerCAmelCase :Optional[int] = hidden_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = use_layerscale
_lowerCAmelCase :List[Any] = layerscale_value
_lowerCAmelCase :List[Any] = use_post_layernorm
_lowerCAmelCase :int = use_post_layernorm_in_modulation
_lowerCAmelCase :str = normalize_modulator
_lowerCAmelCase :Optional[int] = initializer_range
_lowerCAmelCase :Optional[int] = layer_norm_eps
_lowerCAmelCase :Dict = encoder_stride
_lowerCAmelCase :str = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase :List[Any] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 382
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""MobileViTFeatureExtractor"""]
a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 382
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.speecht5""")
lowercase_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
lowercase_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
lowercase_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
lowercase_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
lowercase_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
lowercase_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
lowercase_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
lowercase_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = []
lowercase_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
lowercase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for attribute in key.split("." ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase__ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
if task == "s2t":
lowercase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowercase__ = MAPPING_S2T
lowercase__ = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase__ = None
lowercase__ = MAPPING_T2S
lowercase__ = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowercase__ = MAPPING_S2S
lowercase__ = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f'''{name} was ignored''' )
continue
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase__ = key.split(".*." )
if prefix in name and suffix in name:
lowercase__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
elif "running_mean" in name:
lowercase__ = '''running_mean'''
elif "running_var" in name:
lowercase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowercase__ = '''num_batches_tracked'''
else:
lowercase__ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
if config_path is not None:
lowercase__ = SpeechTaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
lowercase__ = SpeechTaConfig()
if task == "s2t":
lowercase__ = config.max_text_positions
lowercase__ = SpeechTaForSpeechToText(_SCREAMING_SNAKE_CASE )
elif task == "t2s":
lowercase__ = 1876
lowercase__ = 600
lowercase__ = config.max_speech_positions
lowercase__ = SpeechTaForTextToSpeech(_SCREAMING_SNAKE_CASE )
elif task == "s2s":
lowercase__ = 1876
lowercase__ = config.max_speech_positions
lowercase__ = SpeechTaForSpeechToSpeech(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
lowercase__ = SpeechTaTokenizer(_SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase__ = AddedToken("<mask>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
lowercase__ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
lowercase__ = SpeechTaFeatureExtractor()
lowercase__ = SpeechTaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint["model"] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 413
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class a__ ( _lowercase ):
__magic_name__ : List[str] = "donut-swin"
__magic_name__ : Optional[int] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : str, __UpperCAmelCase : Union[str, Any]=224, __UpperCAmelCase : Any=4, __UpperCAmelCase : Optional[Any]=3, __UpperCAmelCase : Union[str, Any]=96, __UpperCAmelCase : Dict=[2, 2, 6, 2], __UpperCAmelCase : int=[3, 6, 12, 24], __UpperCAmelCase : Optional[int]=7, __UpperCAmelCase : Optional[Any]=4.0, __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : int=0.0, __UpperCAmelCase : Union[str, Any]=0.0, __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : int="gelu", __UpperCAmelCase : Tuple=False, __UpperCAmelCase : Tuple=0.02, __UpperCAmelCase : str=1e-5, **__UpperCAmelCase : List[str], ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : Dict = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
SCREAMING_SNAKE_CASE : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : Union[str, Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 507
| 0
|
import os
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : int = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
lowerCamelCase__ : Any = os.path.join(_UpperCAmelCase , 'triangle.txt' )
with open(_UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = f.readlines()
lowerCamelCase__ : Optional[int] = []
for line in triangle:
lowerCamelCase__ : Any = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase__ : List[str] = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase__ : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 702
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188
| 0
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(lowercase , params=lowercase ).content , "html.parser" )
SCREAMING_SNAKE_CASE : Union[str, Any] = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
snake_case = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 62
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ) -> int:
a_ : Any = 1
a_ : str = 3
a_ : Dict = (3_2, 3_2)
a_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def A ( self ) -> Tuple:
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A ( self ) -> Any:
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> Optional[int]:
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
a_ : Optional[int] = torch.ones([0] )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def A ( self ) -> Optional[Any]:
a_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.dummy_cond_unet
a_ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
a_ : List[str] = self.dummy_vae
a_ : List[str] = self.dummy_text_encoder
a_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : List[str] = output.images
a_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Optional[Any]:
a_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Any = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a_ : str = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Dict = "A painting of a squirrel eating a burger"
a_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Union[str, Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a_ : Union[str, Any] = output.images
a_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -3:, -3:, -1]
a_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> List[str]:
a_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a_ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A ( self ) -> Union[str, Any]:
a_ : Tuple = self.dummy_cond_unet
a_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
a_ : Tuple = self.dummy_vae
a_ : Optional[Any] = self.dummy_text_encoder
a_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a_ : Union[str, Any] = unet.half()
a_ : Optional[Any] = vae.half()
a_ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
a_ : Tuple = StableDiffusionPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : int = "A painting of a squirrel eating a burger"
a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[Any]:
a_ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : str = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a_ : Optional[int] = 4_0_0_3_6_6_0_3_4_6
a_ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
a_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : Any = image[0, -3:, -3:, -1]
a_ : List[str] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a_ : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : List[str] = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> Dict:
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = "padme amidala taking a bath artwork, safe for work, no nudity"
a_ : List[Any] = 2_7_3_4_9_7_1_7_5_5
a_ : Tuple = 7
a_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Dict = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : str = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ) -> int:
a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a_ : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
a_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a_ : List[str] = 1_0_4_4_3_5_5_2_3_4
a_ : Dict = 1_2
a_ : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Tuple = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
a_ : Any = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : Optional[int] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ : int = output.images
a_ : Union[str, Any] = image[0, -3:, -3:, -1]
a_ : Tuple = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 473
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = LayoutLMTokenizer
__SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Dict ):
"""simple docstring"""
super().setUp()
__snake_case = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A ( self : Optional[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a_ )
def A ( self : Any , a_ : Any ):
"""simple docstring"""
__snake_case = "UNwant\u00E9d,running"
__snake_case = "unwanted, running"
return input_text, output_text
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] )
def A ( self : int ):
"""simple docstring"""
pass
| 680
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680
| 1
|
"""simple docstring"""
import string
import numpy
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _a )
class _a :
a_ : str = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a_ : int = numpy.vectorize(lambda SCREAMING_SNAKE_CASE_ : x % 36 )
a_ : Optional[int] = numpy.vectorize(SCREAMING_SNAKE_CASE_ )
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : numpy.ndarray ):
lowerCamelCase__ = self.modulus(SCREAMING_SNAKE_CASE__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__ = encrypt_key.shape[0]
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
return self.key_string.index(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return self.key_string[round(SCREAMING_SNAKE_CASE__ )]
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ = det % len(self.key_string )
lowerCamelCase__ = len(self.key_string )
if greatest_common_divisor(SCREAMING_SNAKE_CASE__ , len(self.key_string ) ) != 1:
lowerCamelCase__ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = [char for char in text.upper() if char in self.key_string]
lowerCamelCase__ = chars[-1]
while len(SCREAMING_SNAKE_CASE__ ) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.process_text(text.upper() )
lowerCamelCase__ = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ = text[i : i + self.break_key]
lowerCamelCase__ = [self.replace_letters(SCREAMING_SNAKE_CASE__ ) for char in batch]
lowerCamelCase__ = numpy.array([vec] ).T
lowerCamelCase__ = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE__ ) ).T.tolist()[
0
]
lowerCamelCase__ = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ = det % len(self.key_string )
lowerCamelCase__ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase__ = i
break
lowerCamelCase__ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.make_decrypt_key()
lowerCamelCase__ = self.process_text(text.upper() )
lowerCamelCase__ = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ = text[i : i + self.break_key]
lowerCamelCase__ = [self.replace_letters(SCREAMING_SNAKE_CASE__ ) for char in batch]
lowerCamelCase__ = numpy.array([vec] ).T
lowerCamelCase__ = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE__ ) ).T.tolist()[0]
lowerCamelCase__ = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case ( )-> None:
'''simple docstring'''
lowerCamelCase__ = int(input('Enter the order of the encryption key: ' ) )
lowerCamelCase__ = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_a ):
lowerCamelCase__ = [int(_a ) for x in input().split()]
hill_matrix.append(_a )
lowerCamelCase__ = HillCipher(numpy.array(_a ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
lowerCamelCase__ = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
lowerCamelCase__ = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_a ) )
elif option == "2":
lowerCamelCase__ = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 510
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def snake_case ( _a: List[str] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_a , _a ):
inputs.append(create_inputs(_a ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def snake_case ( _a: List )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = []
for output in outputs:
if isinstance(_a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class _a :
def _UpperCamelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__ ) , self.tool.outputs )
def _UpperCamelCase ( self : str ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.outputs ):
lowerCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
| 510
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase__ ), 'Tatoeba directory does not exist.' )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
UpperCAmelCase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase )
@slow
def A__ ( self ):
self.resolver.convert_models(["heb-eng"] )
@slow
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 713
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = 'levit'
def __init__(self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.02 , **A , ) -> Any:
"""simple docstring"""
super().__init__(**A )
_a = image_size
_a = num_channels
_a = kernel_size
_a = stride
_a = padding
_a = hidden_sizes
_a = num_attention_heads
_a = depths
_a = key_dim
_a = drop_path_rate
_a = patch_size
_a = attention_ratio
_a = mlp_ratio
_a = initializer_range
_a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 11
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A : str = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase_ ( A__ : str , A__ : int=1_00 , A__ : List[Any]=" " ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def UpperCamelCase_ ( A__ : dict ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else """""" )
texts.append(A__ )
return {"title": titles, "text": texts}
def UpperCamelCase_ ( A__ : dict , A__ : DPRContextEncoder , A__ : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
lowerCAmelCase_ : Any = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=A__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ : Optional[Any] = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase_ ( A__ : "RagExampleArguments" , A__ : "ProcessingArguments" , A__ : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ : str = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
lowerCAmelCase_ : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ : Optional[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ : str = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
lowerCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=A__ )
# And save the index
lowerCAmelCase_ : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=str(Path(_SCREAMING_SNAKE_CASE).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv') ,metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} ,)
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} ,)
lowercase = field(
default='facebook/rag-sequence-nq' ,metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} ,)
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' ,metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} ,)
lowercase = field(
default=str(Path(_SCREAMING_SNAKE_CASE).parent / 'test_run' / 'dummy-kb') ,metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} ,)
lowercase = field(
default=16 ,metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=7_68 ,metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A : Optional[int] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A : Dict = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 701
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : Dict = "http://www.mocksite.com/file1.txt"
__A : List[str] = "\"text\": [\"foo\", \"foo\"]"
__A : int = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __snake_case :
"""simple docstring"""
lowercase = 2_00
lowercase = {'Content-Length': '100'}
lowercase = {}
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> str:
return [bytes(lowerCamelCase , """utf-8""" )]
def UpperCamelCase_ ( *A__ : List[str] , **A__ : Union[str, Any] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : List[Any] , A__ : List[Any] , A__ : str ):
'''simple docstring'''
import requests
monkeypatch.setattr(A__ , """request""" , A__ )
lowerCAmelCase_ : Tuple = URL
if issubclass(A__ , A__ ):
lowerCAmelCase_ : Optional[Any] = url
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Dict = [url]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Tuple = {"""train""": url}
lowerCAmelCase_ : List[Any] = """dummy"""
lowerCAmelCase_ : str = """downloads"""
lowerCAmelCase_ : Dict = tmp_path
lowerCAmelCase_ : Any = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
lowerCAmelCase_ : List[Any] = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : int = dl_manager.download(A__ )
lowerCAmelCase_ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : str = [downloaded_paths]
lowerCAmelCase_ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase_ : Union[str, Any] = downloaded_paths.values()
lowerCAmelCase_ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase_ : Tuple = Path(A__ )
lowerCAmelCase_ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase_ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase_ : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase_ : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : int = str(A__ )
if issubclass(A__ , A__ ):
lowerCAmelCase_ : int = filename
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : List[str] = [filename]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Union[str, Any] = {"""train""": filename}
lowerCAmelCase_ : Optional[int] = """dummy"""
lowerCAmelCase_ : str = xz_file.parent
lowerCAmelCase_ : List[str] = """extracted"""
lowerCAmelCase_ : Union[str, Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
lowerCAmelCase_ : str = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : Union[str, Any] = dl_manager.extract(A__ )
lowerCAmelCase_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = [extracted_paths]
lowerCAmelCase_ : Union[str, Any] = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
lowerCAmelCase_ : Union[str, Any] = extracted_paths.values()
lowerCAmelCase_ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase_ : int = Path(A__ )
lowerCAmelCase_ : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase_ : Any = extracted_path.read_text()
lowerCAmelCase_ : Optional[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase_ ( A__ : Dict , A__ : Any ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(A__ , start=1 ):
lowerCAmelCase_ : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = request.getfixturevalue(A__ )
lowerCAmelCase_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = request.getfixturevalue(A__ )
lowerCAmelCase_ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 398
| 0
|
def __a ( lowerCAmelCase_ : int = 10_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= 1, 1
UpperCAmelCase_= 2
while True:
UpperCAmelCase_= 0
UpperCAmelCase_= fa + fa
UpperCAmelCase_, UpperCAmelCase_= fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 593
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 593
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase ( A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , )-> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
a : Any = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
a : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
a : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict=13 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : int=99 , __UpperCAmelCase : Tuple=16 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , ):
a : str = parent
a : Optional[Any] = batch_size
a : Union[str, Any] = seq_length
a : int = is_training
a : str = use_labels
a : Optional[Any] = vocab_size
a : Tuple = hidden_size
a : Any = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Optional[Any] = intermediate_size
a : Optional[Any] = hidden_act
a : int = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Optional[int] = eos_token_id
a : Optional[int] = pad_token_id
a : str = bos_token_id
a : Tuple = initializer_range
def __snake_case ( self : List[Any]):
a : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
a : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
a : List[Any] = shift_tokens_right(__UpperCamelCase , 1 , 2)
a : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , )
a : List[str] = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
return config, inputs_dict
def __snake_case ( self : str):
a , a : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple):
a : int = 20
a : List[str] = model_class_name(__UpperCamelCase)
a : int = model.encode(inputs_dict["input_ids"])
a , a : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a : int = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase)
a : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
a : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : Any = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
a : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a : Any = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
a : List[str] = model.decode(__UpperCamelCase , __UpperCamelCase)
a : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''')
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any):
a : Any = 20
a : Any = model_class_name(__UpperCamelCase)
a : Optional[int] = model.encode(inputs_dict["input_ids"])
a , a : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
a : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase)
a : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a : str = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
a : Union[str, Any] = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase)
a : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''')
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = 9_9
def __snake_case ( self : Any):
a : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a : int = input_ids.shape[0]
a : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __snake_case ( self : Tuple):
a , a , a : int = self._get_config_and_data()
a : Tuple = FlaxBlenderbotForConditionalGeneration(__UpperCamelCase)
a : str = lm_model(input_ids=__UpperCamelCase)
a : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __UpperCamelCase)
def __snake_case ( self : List[str]):
a : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a : List[Any] = FlaxBlenderbotForConditionalGeneration(__UpperCamelCase)
a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
a : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
a : int = lm_model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase)
a : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __UpperCamelCase)
def __snake_case ( self : Any):
a : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
a : Optional[int] = shift_tokens_right(__UpperCamelCase , 1 , 2)
a : Optional[int] = np.equal(__UpperCamelCase , 1).astype(np.floataa).sum()
a : str = np.equal(__UpperCamelCase , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(__UpperCamelCase , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _A ( _a ,unittest.TestCase ,_a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = FlaxBlenderbotModelTester(self)
def __snake_case ( self : int):
a , a : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def __snake_case ( self : List[str]):
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def __snake_case ( self : Optional[int]):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a : List[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase)
a : str = model_class(__UpperCamelCase)
@jax.jit
def encode_jitted(__UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase)
with self.subTest("JIT Enabled"):
a : Optional[int] = encode_jitted(**__UpperCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a : str = encode_jitted(**__UpperCamelCase).to_tuple()
self.assertEqual(len(__UpperCamelCase) , len(__UpperCamelCase))
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def __snake_case ( self : int):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a : Optional[int] = model_class(__UpperCamelCase)
a : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
a : Union[str, Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("JIT Enabled"):
a : List[Any] = decode_jitted(**__UpperCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a : Optional[Any] = decode_jitted(**__UpperCamelCase).to_tuple()
self.assertEqual(len(__UpperCamelCase) , len(__UpperCamelCase))
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def __snake_case ( self : Optional[int]):
for model_class_name in self.all_model_classes:
a : Tuple = model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a : Dict = np.ones((1, 1)) * model.config.eos_token_id
a : Tuple = model(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def __snake_case ( self : Optional[Any]):
a : str = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
a : Dict = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
a : int = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__UpperCamelCase)
a : List[Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
a : Optional[Any] = ["Sam"]
a : Tuple = tokenizer(__UpperCamelCase , return_tensors="jax")
a : List[Any] = model.generate(**__UpperCamelCase , **__UpperCamelCase)
a : Tuple = "Sam is a great name. It means \"sun\" in Gaelic."
a : Optional[int] = tokenizer.batch_decode(__UpperCamelCase , **__UpperCamelCase)
assert generated_txt[0].strip() == tgt_text
| 715
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowercase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__lowercase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__lowercase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
__lowercase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__lowercase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Dict):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string")),
"references": datasets.Value("string"),
}) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __snake_case ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=[1, 10, 100] , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple=3.0):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows.")
with ThreadPoolExecutor(max_workers=__UpperCAmelCase) as executor:
a : Tuple = []
a : Any = Counter()
a : List[Any] = 0
a : int = defaultdict(__UpperCAmelCase)
for task_id, (candidates, test_case) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase)):
for candidate in candidates:
a : List[Any] = candidate + "\n" + test_case
a : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
a : Union[str, Any] = executor.submit(__UpperCAmelCase , *__UpperCAmelCase)
futures.append(__UpperCAmelCase)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCAmelCase):
a : Dict = future.result()
results[result["task_id"]].append((result["completion_id"], result))
a , a : List[Any] = [], []
for result in results.values():
result.sort()
a : Union[str, Any] = [r[1]["passed"] for r in result]
total.append(len(__UpperCAmelCase))
correct.append(sum(__UpperCAmelCase))
a : Any = np.array(__UpperCAmelCase)
a : Optional[Any] = np.array(__UpperCAmelCase)
a : List[str] = k
a : Union[str, Any] = {f'''pass@{k}''': estimate_pass_at_k(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowercase ( A_ , A_ , A_ )-> Dict:
'''simple docstring'''
def estimator(A_ , A_ , A_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A_ , A_ ):
a : Dict = itertools.repeat(A_ , len(A_ ) )
else:
assert len(A_ ) == len(A_ )
a : int = iter(A_ )
return np.array([estimator(int(A_ ) , int(A_ ) , A_ ) for n, c in zip(A_ , A_ )] )
| 135
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(__lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , __lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 399
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
| 0
|
from __future__ import annotations
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(snake_case ) <= 1 or n <= 1:
return
insert_next(snake_case , n - 1 )
rec_insertion_sort(snake_case , n - 1 )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__SCREAMING_SNAKE_CASE : Optional[int] = (
collection[index],
collection[index - 1],
)
insert_next(snake_case , index + 1 )
if __name__ == "__main__":
lowercase_ = input("""Enter integers separated by spaces: """)
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 718
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase_ = float("""nan""")
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = sys.stdout
__SCREAMING_SNAKE_CASE : int = open(_A , '''a''' )
def __getattr__( self : int , _A : str ):
"""simple docstring"""
return getattr(self.stdout , _A )
def UpperCAmelCase__ ( self : Dict , _A : Any ):
"""simple docstring"""
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , _A , 0 , re.M ) )
def a__ ( snake_case=80 , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
# deal with critical env vars
__SCREAMING_SNAKE_CASE : List[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__SCREAMING_SNAKE_CASE : Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__SCREAMING_SNAKE_CASE : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
return "\\\n".join(snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# unwrap multi-line input
__SCREAMING_SNAKE_CASE : Dict = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
__SCREAMING_SNAKE_CASE : Any = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__SCREAMING_SNAKE_CASE : Any = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
__SCREAMING_SNAKE_CASE : Optional[int] = variation.replace(''' ''' , '''-''' )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Any = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = F'''{id}: {variation:<{longest_variation_len}}'''
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{preamble}: '''
__SCREAMING_SNAKE_CASE : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
__SCREAMING_SNAKE_CASE : str = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
__SCREAMING_SNAKE_CASE : str = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__SCREAMING_SNAKE_CASE : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = pd.DataFrame(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''diff_%'''
__SCREAMING_SNAKE_CASE : str = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__SCREAMING_SNAKE_CASE : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
__SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
__SCREAMING_SNAKE_CASE : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__SCREAMING_SNAKE_CASE : Union[str, Any] = df.reindex(snake_case , axis='''columns''' ) # reorder cols
# capitalize
__SCREAMING_SNAKE_CASE : str = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
__SCREAMING_SNAKE_CASE : Any = df.rename(lambda snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = df.rename(lambda snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
__SCREAMING_SNAKE_CASE : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(snake_case ) )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=snake_case , type=snake_case , required=snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=snake_case , type=snake_case , nargs='''+''' , required=snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=snake_case , type=snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=snake_case , type=snake_case , required=snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : int = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
__SCREAMING_SNAKE_CASE : Optional[Any] = [list(map(str.strip , re.split(R'''\|''' , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(map(str.strip , map(''' '''.join , itertools.product(*snake_case ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = max(len(snake_case ) for x in variations )
# split wanted keys
__SCREAMING_SNAKE_CASE : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
__SCREAMING_SNAKE_CASE : Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__SCREAMING_SNAKE_CASE : str = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
__SCREAMING_SNAKE_CASE : str = '''variation'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for id, variation in enumerate(tqdm(snake_case , desc='''Total completion: ''' , leave=snake_case ) ):
__SCREAMING_SNAKE_CASE : int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 131
| 0
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a (lowerCAmelCase__ ):
__a = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "finetuned" not in model_name:
__a = False
if "finetuned" in model_name:
__a = """huggingface/label-files"""
if "kinetics" in model_name:
__a = 400
__a = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__a = 174
__a = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if "small" in model_name:
__a = 384
__a = 1_536
__a = 12
__a = 16
__a = 12
__a = 3
__a = 192
__a = 768
elif "large" in model_name:
__a = 1_024
__a = 4_096
__a = 24
__a = 16
__a = 12
__a = 8
__a = 512
__a = 2_048
elif "huge" in model_name:
__a = 1_280
__a = 5_120
__a = 32
__a = 16
__a = 12
__a = 8
__a = 640
__a = 2_560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a (lowerCAmelCase__ ):
if "encoder." in name:
__a = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__a = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__a = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__a = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__a = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__a = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__a = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__a = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__a = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__a = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__a = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__a = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__a = name.replace("""head""" , """classifier""" )
return name
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(lowerCAmelCase__ )
if key.startswith("""encoder.""" ):
__a = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__a = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__a = config.decoder_hidden_size
__a = int(key_split[2] )
__a = """decoder.decoder_layers."""
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = config.hidden_size
__a = int(key_split[1] )
__a = """videomae.encoder.layer."""
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val
return orig_state_dict
def a ():
__a = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__a = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = get_videomae_config(lowerCAmelCase__ )
if "finetuned" in model_name:
__a = VideoMAEForVideoClassification(lowerCAmelCase__ )
else:
__a = VideoMAEForPreTraining(lowerCAmelCase__ )
# download original checkpoint, hosted on Google Drive
__a = """pytorch_model.bin"""
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
if "model" in files:
__a = files["""model"""]
else:
__a = files["""module"""]
__a = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify model on basic input
__a = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__a = prepare_video()
__a = image_processor(lowerCAmelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
__a = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__a = torch.load(lowerCAmelCase__ )
__a = model(**lowerCAmelCase__ )
__a = outputs.logits
__a = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__a = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__a = torch.Size([1, 400] )
__a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__a = torch.Size([1, 174] )
__a = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__a = outputs.loss
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 99
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""CLIPFeatureExtractor"""]
lowerCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411
| 0
|
from PIL import Image
def __lowerCAmelCase ( UpperCamelCase ) -> Image:
lowerCAmelCase__ : Any = image.size
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[Any] = image.load()
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase ):
for i in range(UpperCamelCase ):
lowerCAmelCase__ : List[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCAmelCase_ = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 470
| 0
|
def a__ ( lowercase__ = 1_0 , lowercase__ = 2_2 ):
'''simple docstring'''
UpperCAmelCase_ =range(1 , lowercase__ )
UpperCAmelCase_ =range(1 , lowercase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 54
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowercase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase : int = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase : List[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : int = [0] * args.vocab_size
for k, v in counter.items():
lowercase : List[Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 116
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
def __init__( self , __A = 101 ) -> Any:
lowerCAmelCase_ :Any = length
def __len__( self ) -> Tuple:
return self.length
def __getitem__( self , __A ) -> int:
return i
class _SCREAMING_SNAKE_CASE :
def __call__( self , __A ) -> Optional[Any]:
return {"input_ids": torch.tensor(_a ), "labels": torch.tensor(_a )}
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase_ :int = nn.Linear(120 , 80 )
def __lowerCAmelCase ( self , __A , __A=None ) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
@require_torch_neuroncore
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[str] = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase_ :Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Dict = f"""--output_dir {output_dir}""".split()
lowerCAmelCase_ :str = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[Any] = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowerCAmelCase_ :Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[int] = f"""--output_dir {output_dir}""".split()
lowerCAmelCase_ :int = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCAmelCase = HfArgumentParser((TrainingArguments,))
__UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__UpperCAmelCase = DummyDataset(dataset_length)
def _snake_case ( lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Tuple = list(range(len(__lowerCAmelCase ) ) )
lowerCAmelCase_ :Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = 2
__UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase = None
| 712
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = HfArgumentParser(lowercase__ )
lowerCAmelCase_ :List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ :Any = TensorFlowBenchmark(args=lowercase__ )
try:
lowerCAmelCase_ :str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ :List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowerCAmelCase_ :Dict = """ """.join(str(lowercase__ ).split(""" """ )[:-1] )
lowerCAmelCase_ :List[Any] = """"""
lowerCAmelCase_ :Dict = eval(str(lowercase__ ).split(""" """ )[-1] )
lowerCAmelCase_ :List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
lowerCAmelCase_ :List[str] = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 256
| 0
|
from __future__ import annotations
__UpperCamelCase : Optional[Any] = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if location := find_empty_location(lowerCamelCase ):
__lowercase , __lowercase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__lowercase = digit
if sudoku(lowerCamelCase ) is not None:
return grid
__lowercase = 0
return None
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__UpperCamelCase : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 80
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase ( SCREAMING_SNAKE_CASE ) -> list[list[float]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE_ = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE_ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE_ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 205
| 0
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 19
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 19
| 1
|
"""simple docstring"""
import requests
__lowerCamelCase = '' # <-- Put your OpenWeatherMap appid here!
__lowerCamelCase = 'https://api.openweathermap.org/data/2.5/'
def a ( __UpperCAmelCase : str = "Chicago" , __UpperCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def a ( __UpperCAmelCase : str = "Kolkata, India" , __UpperCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def a ( __UpperCAmelCase : float = 55.68 , __UpperCAmelCase : float = 12.57 , __UpperCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowerCamelCase = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 96
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "openai/whisper-base"
UpperCAmelCase__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCAmelCase__ = "transcriber"
UpperCAmelCase__ = WhisperProcessor
UpperCAmelCase__ = WhisperForConditionalGeneration
UpperCAmelCase__ = ["audio"]
UpperCAmelCase__ = ["text"]
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] ) -> Dict:
return self.pre_processor(__snake_case , return_tensors="""pt""" ).input_features
def lowerCamelCase__ ( self : int , __snake_case : Union[str, Any] ) -> List[str]:
return self.model.generate(inputs=__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[Any] ) -> Any:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0]
| 96
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : Any = BlipImageProcessor()
snake_case__ : List[str] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : int = BlipaProcessor(__a , __a )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Any , **__a : str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).tokenizer
def lowercase ( self : Any , **__a : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def lowercase ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : str ):
snake_case__ : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : int = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Union[str, Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
snake_case__ : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowercase ( self : Tuple ):
snake_case__ : Any = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : List[str] = image_processor(__a , return_tensors="""np""" )
snake_case__ : List[Any] = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[Any] ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Any = BlipaProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Any = """lower newer"""
snake_case__ : Optional[Any] = processor(text=__a )
snake_case__ : Any = tokenizer(__a , return_token_type_ids=__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Tuple ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Any = """lower newer"""
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : Optional[int] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowercase ( self : List[Any] ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : str = processor.batch_decode(__a )
snake_case__ : int = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowercase ( self : str ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : List[Any] = """lower newer"""
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : Optional[int] = processor(text=__a , images=__a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 127
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_: int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Union[str, Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCAmelCase__ : int = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
# load decoder from hub
UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder"""
def __lowercase ( self : str ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[str] ,**A : Dict ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Any ,**A : List[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A )
def __lowercase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(A ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : List[Any] = """This is a test string"""
UpperCAmelCase__ : int = processor(text=A )
UpperCAmelCase__ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ):
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase__ : Tuple = processor.decode(A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __lowercase ( self : List[str] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : List[str] = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A )
UpperCAmelCase__ : Optional[Any] = list(A )
with get_context("""fork""" ).Pool() as p:
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(A ,decoded_processor.logit_score )
self.assertListEqual(A ,decoded_processor.lm_score )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Any = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : List[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[str] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(A )
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A )
self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) )
self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = 2.0
UpperCAmelCase__ : str = 5.0
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : str = processor.batch_decode(
A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
UpperCAmelCase__ : Any = decoded_processor_out.text
UpperCAmelCase__ : Union[str, Any] = list(A )
decoder.reset_params(
alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(
A ,A ,)
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A )
UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[int] = os.listdir(A )
UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A )
UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Tuple = os.listdir(A )
UpperCAmelCase__ : Dict = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = floats_list((3, 1_000) )
UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" )
UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A )
UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def __lowercase ( A : Optional[Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : int = self._get_dummy_logits()
UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A )
UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Tuple = iter(A )
UpperCAmelCase__ : Optional[int] = next(A )
UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy()
UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A )
UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A )
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text )
# output times
UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) )
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
| 65
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[Any] ,**A : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """</s>"""
UpperCAmelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(A ) ,9 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A )
self.assertIsInstance(A ,A )
UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(A ,batch.input_ids[0] )
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )]
self.assertIn("""source.spm""" ,A )
MarianTokenizer.from_pretrained(A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCAmelCase__ : Any = """Tämä on testi"""
UpperCAmelCase__ : int = """This is a test"""
UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2]
UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2]
UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertEqual(A ,A )
| 65
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCamelCase_ = 4
lowerCamelCase_ = 3
class UpperCamelCase_ (__A ):
pass
def snake_case ( A__ ):
for shard in shards:
for i in range(A__ ):
yield {"i": i, "shard": shard}
def snake_case ( ):
UpperCAmelCase_ : Any = int(os.environ["RANK"] )
UpperCAmelCase_ : Dict = int(os.environ["WORLD_SIZE"] )
UpperCAmelCase_ : str = ArgumentParser()
parser.add_argument("--streaming" ,type=A__ )
parser.add_argument("--local_rank" ,type=A__ )
parser.add_argument("--num_workers" ,type=A__ ,default=0 )
UpperCAmelCase_ : str = parser.parse_args()
UpperCAmelCase_ : Optional[int] = args.streaming
UpperCAmelCase_ : str = args.num_workers
UpperCAmelCase_ : Optional[int] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(A__ )]}
UpperCAmelCase_ : int = IterableDataset.from_generator(A__ ,gen_kwargs=A__ )
if not streaming:
UpperCAmelCase_ : Optional[Any] = Dataset.from_list(list(A__ ) )
UpperCAmelCase_ : Tuple = split_dataset_by_node(A__ ,rank=A__ ,world_size=A__ )
UpperCAmelCase_ : int = torch.utils.data.DataLoader(A__ ,num_workers=A__ )
UpperCAmelCase_ : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ : Any = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 463
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case ( A__ ):
return np.dot(A__ ,A__ )
class UpperCamelCase_ :
def __init__( self : int , *,
lowerCAmelCase_ : float = np.inf , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : float = 0.0 , ) -> None:
UpperCAmelCase_ : List[str] = regularization
UpperCAmelCase_ : Tuple = gamma
if kernel == "linear":
UpperCAmelCase_ : List[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ : Tuple = f"""Unknown kernel: {kernel}"""
raise ValueError(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray ) -> float:
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : list[ndarray] , lowerCAmelCase_ : ndarray ) -> None:
UpperCAmelCase_ : int = observations
UpperCAmelCase_ : Optional[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) : Optional[int] = np.shape(lowerCAmelCase_ )
def to_minimize(lowerCAmelCase_ : ndarray ) -> float:
UpperCAmelCase_ : int = 0
((UpperCAmelCase_) , ) : str = np.shape(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowerCAmelCase_ )
UpperCAmelCase_ : int = LinearConstraint(lowerCAmelCase_ , 0 , 0 )
UpperCAmelCase_ : Dict = Bounds(0 , self.regularization )
UpperCAmelCase_ : Optional[Any] = minimize(
lowerCAmelCase_ , np.ones(lowerCAmelCase_ ) , bounds=lowerCAmelCase_ , constraints=[ly_contraint] ).x
UpperCAmelCase_ : Optional[Any] = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ : Optional[int] = 0
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ : str = s / n
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : ndarray ) -> int:
UpperCAmelCase_ : List[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowerCAmelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[int] = logging.getLogger(__name__)
class UpperCAmelCase_ :
def __init__( self ):
"""simple docstring"""
A_ = False
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case ):
"""simple docstring"""
if not self.initialized:
A_ = RagRetriever(
lowerCamelCase_ ,question_encoder_tokenizer=lowerCamelCase_ ,generator_tokenizer=lowerCamelCase_ ,index=lowerCamelCase_ ,init_retrieval=lowerCamelCase_ ,)
A_ = True
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = self.retriever._main_retrieve(lowerCamelCase_ ,lowerCamelCase_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase_ ( _UpperCamelCase ):
def __init__( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(lowerCamelCase_ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
lowerCamelCase_ ,question_encoder_tokenizer=lowerCamelCase_ ,generator_tokenizer=lowerCamelCase_ ,index=lowerCamelCase_ ,init_retrieval=lowerCamelCase_ ,)
A_ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A_ = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
A_ = ray.get(random_worker.retrieve.remote(lowerCamelCase_ ,lowerCamelCase_ ) )
else:
A_ = self._main_retrieve(lowerCamelCase_ ,lowerCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls ,__snake_case ,__snake_case=None ,**__snake_case ):
"""simple docstring"""
return super(lowerCamelCase_ ,cls ).get_tokenizers(lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls ,__snake_case ,__snake_case ,__snake_case=None ,**__snake_case ):
"""simple docstring"""
A_ = kwargs.pop('''config''' ,lowerCamelCase_ ) or RagConfig.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
A_ = RagTokenizer.from_pretrained(lowerCamelCase_ ,config=lowerCamelCase_ )
A_ = rag_tokenizer.question_encoder
A_ = rag_tokenizer.generator
if indexed_dataset is not None:
A_ = 'custom'
A_ = CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase_ )
else:
A_ = cls._build_index(lowerCamelCase_ )
return cls(
lowerCamelCase_ ,question_encoder_tokenizer=lowerCamelCase_ ,generator_tokenizer=lowerCamelCase_ ,retrieval_workers=lowerCamelCase_ ,index=lowerCamelCase_ ,)
| 188
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[float] , _lowercase : Tuple ) -> int:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowercase ):
print(f"""{i}\t\t{d}""" )
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : list[float] , _lowercase : int ) -> Any:
'''simple docstring'''
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : int , _lowercase : int , _lowercase : int ) -> list[float]:
'''simple docstring'''
lowercase__ : Dict = [float('inf' )] * vertex_count
lowercase__ : Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : str = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: Optional[int] = int(input("""Enter number of vertices: """).strip())
__UpperCamelCase: Union[str, Any] = int(input("""Enter number of edges: """).strip())
__UpperCamelCase: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase: List[str] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__UpperCamelCase: List[Any] = {"""src""": src, """dst""": dest, """weight""": weight}
__UpperCamelCase: Optional[int] = int(input("""\nEnter shortest path source:""").strip())
__UpperCamelCase: Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 266
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__snake_case = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
__snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : Optional[int]="<s>" , __magic_name__ : int="<unk>" , __magic_name__ : str="<pad>" , __magic_name__ : List[str]="<mask>" , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : int , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase = src_lang
UpperCamelCase = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = tgt_lang_id
return inputs
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self : Any , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
UpperCamelCase = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 719
|
import math
import unittest
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 181
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Tuple =logging.get_logger(__name__)
__snake_case :str ={
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = 'markuplm'
def __init__( self : Optional[Any] , __UpperCamelCase : List[str]=30_522 , __UpperCamelCase : Tuple=768 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Any=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Optional[int]=1e-12 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : int=1_024 , __UpperCamelCase : Union[str, Any]=216 , __UpperCamelCase : Optional[Any]=1_001 , __UpperCamelCase : Any=32 , __UpperCamelCase : Union[str, Any]=50 , __UpperCamelCase : Tuple="absolute" , __UpperCamelCase : int=True , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[Any] , ) -> Tuple:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
# additional properties
A = max_depth
A = max_xpath_tag_unit_embeddings
A = max_xpath_subs_unit_embeddings
A = tag_pad_id
A = subs_pad_id
A = xpath_unit_hidden_size
| 106
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE_ : Optional[float] =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
default=__magic_name__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE_ : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ : str =field(default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ : float =field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float =field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase = split['train']
UpperCamelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
if training_args.do_train:
UpperCamelCase = ds['train'].column_names
else:
UpperCamelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = 'image'
elif "img" in column_names:
UpperCamelCase = 'img'
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size['shortest_edge']
else:
UpperCamelCase = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase = Compose(
[
Lambda(lambda _lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowercase ):
UpperCamelCase = [transforms(_lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowercase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , _lowercase )
trainer.save_metrics('eval' , _lowercase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def __lowerCamelCase ( _lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 282
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ : Optional[Any] = deprecated_arg[3:]
A_ : List[str] = not kwargs.pop(_lowerCamelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
A_ : Dict = kwargs.pop('''tpu_name''' , self.tpu_name )
A_ : Dict = kwargs.pop('''device_idx''' , self.device_idx )
A_ : List[Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
A_ : Tuple = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowerCamelCase )
__lowerCAmelCase : str = field(
default=__UpperCAmelCase , metadata={"""help""": """Name of TPU"""} , )
__lowerCAmelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
__lowerCAmelCase : bool = field(default=__UpperCAmelCase , metadata={"""help""": """Benchmark models in eager model."""})
__lowerCAmelCase : bool = field(
default=__UpperCAmelCase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
A_ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
A_ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A_ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A_ : int = None
return tpu
@cached_property
def a_ ( self : str ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A_ : Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
A_ : Any = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
A_ : Dict = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a_ ( self : Tuple ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a_ ( self : List[str] ):
"""simple docstring"""
return self.n_gpu > 0
| 710
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 361
| 0
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 474
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
__lowercase =os.path.join(_lowerCAmelCase , 'words.txt' )
__lowercase =''
with open(_lowerCAmelCase ) as f:
__lowercase =f.readline()
__lowercase =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowercase =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 474
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 1_6
UpperCAmelCase_ = 3_2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 1_6 )->Tuple:
_lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
_lowerCAmelCase = 8
else:
_lowerCAmelCase = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )->Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
_lowerCAmelCase = 2
# Initialize accelerator
_lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config['''lr''']
_lowerCAmelCase = int(config['''num_epochs'''] )
_lowerCAmelCase = int(config['''seed'''] )
_lowerCAmelCase = int(config['''batch_size'''] )
_lowerCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664
|
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=1_8 , lowercase=3_0 , lowercase=4_0_0 , lowercase=True , lowercase=None , lowercase=True , ):
"""simple docstring"""
A_ : List[str] = size if size is not None else {'height': 1_8, 'width': 1_8}
A_ : str = parent
A_ : Any = batch_size
A_ : Union[str, Any] = num_channels
A_ : int = image_size
A_ : str = min_resolution
A_ : List[Any] = max_resolution
A_ : Tuple = do_resize
A_ : Union[str, Any] = size
A_ : List[Any] = apply_ocr
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'apply_ocr' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE__ )
# Test batched
A_ : Dict = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : int = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
A_ : Dict = Image.open(ds[0]['file'] ).convert('RGB' )
A_ : Dict = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Tuple = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A_ : str = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE__ )
# with apply_OCR = False
A_ : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ )
A_ : List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 558
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> int:
lowerCAmelCase__ = "ZinengTang/tvlt-base"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 61
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A__ : List[str] = grid[0]
for row_n in range(1, len(__lowerCAmelCase ) ):
A__ : Optional[Any] = grid[row_n]
A__ : Optional[Any] = fill_row(__lowerCAmelCase, __lowerCAmelCase )
A__ : Tuple = grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->list:
current_row[0] += row_above[0]
for cell_n in range(1, len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
from timeit import timeit
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCAmelCase ( ) ->None:
def do_benchmark(UpperCAmelCase__ : int ) -> None:
A__ : Optional[int] = """import __main__ as z"""
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(UpperCAmelCase__ ) = }' )
A__ : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""", setup=UpperCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase__ ) = }' )
A__ : List[str] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""", setup=UpperCAmelCase__, )
print(f'timeit() runs in {timing} seconds' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(UpperCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 498
| 0
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ : Optional[int] = 10
def lowerCAmelCase_ ( _lowerCamelCase: list[int] ):
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : str = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__SCREAMING_SNAKE_CASE : Union[str, Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
__SCREAMING_SNAKE_CASE : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
__SCREAMING_SNAKE_CASE : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 578
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: float = 1 / 1_23_45 ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[str] = 3
while True:
__SCREAMING_SNAKE_CASE : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
total_partitions += 1
if check_partition_perfect(_lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 578
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( snake_case_ : float , snake_case_ : float ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 220
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__UpperCAmelCase = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__UpperCAmelCase = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE : Any = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[Any] = numpy_to_pil(snake_case_ )
return images
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Any:
if images.ndim == 3:
SCREAMING_SNAKE_CASE : Optional[Any] = images[None, ...]
SCREAMING_SNAKE_CASE : Optional[int] = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 220
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 100 ) -> int:
return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 2
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409
| 0
|
'''simple docstring'''
import string
from math import logaa
def _lowerCAmelCase( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> int:
lowerCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
lowerCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowerCAmelCase( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> tuple[int, int]:
lowerCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase__ = corpus_without_punctuation.split("""\n""" )
lowerCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def _lowerCAmelCase( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _lowerCAmelCase( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
return round(tf * idf , 3 )
| 211
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : List[str] , *__A : int , **__A : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[Any] , *__A : Dict , **__A : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Any , *__A : Any , **__A : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : int , *__A : Tuple , **__A : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *__A : int , **__A : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : int , *__A : List[Any] , **__A : List[str] ) -> int:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : Dict , *__A : str , **__A : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[str] , *__A : str , **__A : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : Any , *__A : Any , **__A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCamelCase__ ( metaclass=_A ):
'''simple docstring'''
A__ = ['''flax''', '''transformers''']
def __init__( self : Tuple , *__A : Any , **__A : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : str , *__A : str , **__A : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def lowercase__ ( cls : List[str] , *__A : Tuple , **__A : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""flax""", """transformers"""] )
| 211
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __A : Union[str, Any] , __A : Any , __A : Dict ) -> Tuple:
'''simple docstring'''
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : List[Any] = BertForPreTraining(__A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__A , __A , __A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 265
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCAmelCase : Tuple = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def UpperCamelCase ( lowercase_ : int ) -> List[Any]:
'''simple docstring'''
lowercase =torch.sin(t * math.pi / 2 ) ** 2
lowercase =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
pass
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
lowercase =DiffusionAttnUnetaD(snake_case_ , n_attn_layers=4 )
lowercase =deepcopy(self.diffusion )
lowercase =torch.quasirandom.SobolEngine(1 , scramble=snake_case_ )
def UpperCamelCase ( lowercase_ : List[str] ) -> int:
'''simple docstring'''
lowercase =MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
_UpperCAmelCase : int = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
_UpperCAmelCase : List[Any] = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
_UpperCAmelCase : Tuple = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
_UpperCAmelCase : str = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
_UpperCAmelCase : str = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
_UpperCAmelCase : int = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def UpperCamelCase ( lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase ( lowercase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : str=1_3 ) -> Any:
'''simple docstring'''
lowercase =input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowercase =0
if string.startswith('''net.3.''' ):
depth += 1
lowercase =string[6:]
elif string.startswith('''net.''' ):
lowercase =string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowercase =string[7:]
if string.startswith('''main.''' ):
lowercase =string[5:]
# mid block
if string[:2].isdigit():
lowercase =string[:2]
lowercase =string[2:]
else:
lowercase =string[0]
lowercase =string[1:]
if depth == max_depth:
lowercase =MID_NUM_TO_LAYER[layer_num]
lowercase ='''mid_block'''
elif depth > 0 and int(lowerCamelCase__ ) < 7:
lowercase =DOWN_NUM_TO_LAYER[layer_num]
lowercase =f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
lowercase =UP_NUM_TO_LAYER[layer_num]
lowercase =f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowercase =DEPTH_0_TO_LAYER[layer_num]
lowercase =f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowercase =string_left[1:]
if "resnets" in new_layer:
lowercase =convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
lowercase =convert_attn_naming(lowerCamelCase__ )
lowercase =new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase =prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowercase =[prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase ( lowercase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase ={}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase =rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase =transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowercase =v
return new_state_dict
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase =v[:, :, 0]
else:
# bias
lowercase =v
else:
# qkv matrices
lowercase =v.shape[0]
lowercase =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase ( lowercase_ : List[str] ) -> str:
'''simple docstring'''
lowercase =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase =args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowercase =download(lowerCamelCase__ )
lowercase =MODELS_MAP[model_name]['''sample_rate''']
lowercase =MODELS_MAP[model_name]['''sample_size''']
lowercase =Object()
lowercase =sample_size
lowercase =sample_rate
lowercase =0
lowercase =UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
lowercase =diffusers_model.state_dict()
lowercase =DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )['''state_dict'''] )
lowercase =orig_model.diffusion_ema.eval()
lowercase =orig_model.state_dict()
lowercase =rename_orig_weights(lowerCamelCase__ )
lowercase =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowercase =value.squeeze()
lowercase =value
diffusers_model.load_state_dict(lowerCamelCase__ )
lowercase =1_0_0
lowercase =3_3
lowercase =IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
lowercase =torch.manual_seed(lowerCamelCase__ )
lowercase =torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
lowercase =torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
lowercase =get_crash_schedule(lowerCamelCase__ )
lowercase =DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
lowercase =torch.manual_seed(3_3 )
lowercase =pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
lowercase =sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
lowercase =generated.clamp(-1 , 1 )
lowercase =(generated - audio).abs().sum()
lowercase =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCamelCase__ )
print('''Diff max''' , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_UpperCAmelCase : List[str] = parser.parse_args()
main(args)
| 721
|
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : str = 10
def UpperCamelCase ( lowercase_ : list[int] ) -> list[int]:
'''simple docstring'''
lowercase =1
lowercase =max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowercase =[[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase =int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
lowercase =0
for b in range(lowercase_ ):
for i in buckets[b]:
lowercase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 224}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase , param_name="crop_size" )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCamelCase , size["shortest_edge"] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
_snake_case = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowerCamelCase )
if do_resize:
_snake_case = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
_snake_case = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
_snake_case = self.rescale(image=lowerCamelCase , scale=lowerCamelCase )
if do_normalize:
_snake_case = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
_snake_case = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase , param_name="crop_size" )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_snake_case = make_batched(lowerCamelCase )
_snake_case = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
_snake_case = {"pixel_values": videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672
|
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a__ : Tuple = 0
a__ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a__ : List[str] = tuple[int, int]
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , a_ : Node | None , ):
"""simple docstring"""
lowerCamelCase__ = pos_x
lowerCamelCase__ = pos_y
lowerCamelCase__ = (pos_y, pos_x)
lowerCamelCase__ = goal_x
lowerCamelCase__ = goal_y
lowerCamelCase__ = g_cost
lowerCamelCase__ = parent
lowerCamelCase__ = self.calculate_heuristic()
lowerCamelCase__ = self.g_cost + self.h_cost
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = self.pos_x - self.goal_x
lowerCamelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(a_ ) + abs(a_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : str , a_ : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase :
"""simple docstring"""
def __init__( self : List[Any] , a_ : TPosition , a_ : TPosition ):
"""simple docstring"""
lowerCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a_ )
lowerCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a_ )
lowerCamelCase__ = [self.start]
lowerCamelCase__ = []
lowerCamelCase__ = False
def _UpperCamelCase ( self : int ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(a_ )
self.closed_nodes.append(a_ )
lowerCamelCase__ = self.get_successors(a_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a_ )
else:
# retrieve the best current path
lowerCamelCase__ = self.open_nodes.pop(self.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a_ )
else:
self.open_nodes.append(a_ )
return [self.start.pos]
def _UpperCamelCase ( self : Tuple , a_ : Node ):
"""simple docstring"""
lowerCamelCase__ = []
for action in delta:
lowerCamelCase__ = parent.pos_x + action[1]
lowerCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a_ , a_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a_ , ) )
return successors
def _UpperCamelCase ( self : Tuple , a_ : Node | None ):
"""simple docstring"""
lowerCamelCase__ = node
lowerCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ = current_node.parent
path.reverse()
return path
class lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , a_ : TPosition , a_ : TPosition ):
"""simple docstring"""
lowerCamelCase__ = AStar(a_ , a_ )
lowerCamelCase__ = AStar(a_ , a_ )
lowerCamelCase__ = False
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
a_ , a_ )
self.fwd_astar.closed_nodes.append(a_ )
self.bwd_astar.closed_nodes.append(a_ )
lowerCamelCase__ = current_bwd_node
lowerCamelCase__ = current_fwd_node
lowerCamelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(a_ ),
self.bwd_astar: self.bwd_astar.get_successors(a_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(a_ )
else:
# retrieve the best current path
lowerCamelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(a_ )
else:
astar.open_nodes.append(a_ )
return [self.fwd_astar.start.pos]
def _UpperCamelCase ( self : List[Any] , a_ : Node , a_ : Node ):
"""simple docstring"""
lowerCamelCase__ = self.fwd_astar.retrace_path(a_ )
lowerCamelCase__ = self.bwd_astar.retrace_path(a_ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a__ : Optional[int] = (0, 0)
a__ : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ : int = time.time()
a__ : Dict = AStar(init, goal)
a__ : Optional[Any] = a_star.search()
a__ : Optional[int] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
a__ : Union[str, Any] = time.time()
a__ : Optional[Any] = BidirectionalAStar(init, goal)
a__ : Any = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 235
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a__ : int = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case (UpperCamelCase : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase , torch.Tensor ):
return image
elif isinstance(UpperCamelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
lowerCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase__ = torch.stack(UpperCamelCase )
return image
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : List[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_ , scheduler=a_ )
def _UpperCamelCase ( self : Any , a_ : str ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self : Union[str, Any] , a_ : str , a_ : int , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = min(int(num_inference_steps * strength ) , a_ )
lowerCamelCase__ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self : List[str] , a_ : Dict , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : int , a_ : List[Any]=None ):
"""simple docstring"""
if not isinstance(a_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}''' )
lowerCamelCase__ = image.to(device=a_ , dtype=a_ )
if isinstance(a_ , a_ ) and len(a_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ = init_latents.shape
lowerCamelCase__ = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
# get latents
print("""add noise to latents at timestep""" , a_ )
lowerCamelCase__ = self.scheduler.add_noise(a_ , a_ , a_ )
lowerCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , a_ : Union[torch.FloatTensor, PIL.Image.Image] = None , a_ : float = 0.8 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : float = 0.0 , a_ : int = 50 , a_ : Optional[bool] = None , a_ : Optional[str] = "pil" , a_ : bool = True , ):
"""simple docstring"""
self.check_inputs(a_ )
# 2. Preprocess image
lowerCamelCase__ = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_ , device=self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(a_ , a_ , self.device )
lowerCamelCase__ = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
lowerCamelCase__ = self.prepare_latents(a_ , a_ , a_ , self.unet.dtype , self.device , a_ )
lowerCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
lowerCamelCase__ = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(
a_ , a_ , a_ , eta=a_ , use_clipped_model_output=a_ , generator=a_ , ).prev_sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 235
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__snake_case , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = [1] * len(_SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
print(max(_SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
lowercase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 235
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : str = ["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = size if size is not None else {"""shortest_edge""": 256}
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase = get_size_dict(lowerCamelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase = get_resize_output_image_size(lowerCamelCase__ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(lowerCamelCase__ )
return center_crop(lowerCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> List[str]:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowerCamelCase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowercase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 719
|
'''simple docstring'''
from statistics import mean, stdev
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = min(a__ )
__lowercase = max(a__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,a__ ) for x in data]
def snake_case_ ( a__ : list ,a__ : int = 3 ):
"""simple docstring"""
__lowercase = mean(a__ )
__lowercase = stdev(a__ )
# standardize data
return [round((x - mu) / (sigma) ,a__ ) for x in data]
| 163
| 0
|
def lowerCAmelCase_ ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0) -> int:
"""simple docstring"""
a__ : str = right or len(_lowercase) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__lowerCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
__lowerCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
a__ : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
a__ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
a__ : Optional[int] = CLIPTextModel(__lowercase )
a__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : Union[str, Any] = CLIPTextModelWithProjection(__lowercase )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Tuple:
"""simple docstring"""
a__ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowercase ) ).to(__lowercase )
a__ : Union[str, Any] = image / 2 + 0.5
if str(__lowercase ).startswith("""mps""" ):
a__ : Dict = torch.manual_seed(__lowercase )
else:
a__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ : Any = self.get_dummy_components()
a__ : List[Any] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : List[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Dict = self.get_dummy_inputs(__lowercase )
a__ : str = sd_pipe(**__lowercase ).images
a__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Union[str, Any] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[str] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : Optional[int] = sd_pipe.to(__lowercase )
a__ : int = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
a__ : Any = self.get_dummy_inputs(__lowercase )
a__ : Optional[int] = 3 * ["""this is a negative prompt"""]
a__ : List[str] = negative_prompt
a__ : Any = 3 * [inputs["""prompt"""]]
a__ : Union[str, Any] = sd_pipe(**__lowercase )
a__ : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
a__ : Optional[Any] = self.get_dummy_inputs(__lowercase )
a__ : Dict = 3 * ["""this is a negative prompt"""]
a__ : int = 3 * [inputs.pop("""prompt""" )]
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
a__ : Any = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
a__ : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : List[Any] = np.random.RandomState(__lowercase ).standard_normal((1, 4, 6_4, 6_4) )
a__ : Dict = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
a__ : List[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Any = self.get_inputs(__lowercase )
a__ : List[str] = pipe(**__lowercase ).images
a__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 136
| 1
|
'''simple docstring'''
import math
def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__lowerCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ : Optional[Any] = "Enter the base and the power separated by a comma: "
lowerCAmelCase_ , lowerCAmelCase_ : Any = map(int, input(prompt).split(","))
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ : str = res(xa, ya)
lowerCAmelCase_ : List[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 461
|
'''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : int | str ) -> bool:
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
return n == n[::-1]
def __a ( __lowerCamelCase : int = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 0
for i in range(1 , __lowerCamelCase ):
if is_palindrome(__lowerCamelCase ) and is_palindrome(bin(__lowerCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 461
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = 'mgp-str'
def __init__( self , UpperCamelCase=[32, 1_28] , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=27 , UpperCamelCase=38 , UpperCamelCase=5_02_57 , UpperCamelCase=3_05_22 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=1E-5 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=False , UpperCamelCase=0.02 , **UpperCamelCase , ) -> Optional[Any]:
super().__init__(**__snake_case)
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : Union[str, Any] = max_token_length
UpperCamelCase__ : str = num_character_labels
UpperCamelCase__ : Tuple = num_bpe_labels
UpperCamelCase__ : int = num_wordpiece_labels
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : str = mlp_ratio
UpperCamelCase__ : Any = distilled
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : List[str] = drop_rate
UpperCamelCase__ : Dict = qkv_bias
UpperCamelCase__ : Union[str, Any] = attn_drop_rate
UpperCamelCase__ : Any = drop_path_rate
UpperCamelCase__ : Union[str, Any] = output_aa_attentions
UpperCamelCase__ : List[str] = initializer_range
| 410
|
__UpperCAmelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase : list[bool | None] = [None] * 10_000_000
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( UpperCamelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_a : Optional[Any] = chain(next_number(UpperCamelCase_ ) )
_a : Dict = number_chain
while number < 1000_0000:
_a : Any = number_chain
number *= 10
return number_chain
def lowerCamelCase_ ( UpperCamelCase_ = 1000_0000 ):
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 471
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , ) -> List[str]:
'''simple docstring'''
lowercase , lowercase = grid.shape
lowercase = [-1, 1, 0, 0]
lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase = [(0, source)], set()
lowercase = np.full((rows, cols) , np.inf )
lowercase = 0
lowercase = np.empty((rows, cols) , dtype=lowerCAmelCase__ )
lowercase = None
while queue:
((lowercase) , (lowercase)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
lowercase , lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ , (dist + 1, (nx, ny)) )
lowercase = dist + 1
lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 162
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowercase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return torch.atana(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCAmelCase_ : List[Any] =torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase_ : List[str] =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
pass
class _snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
super().__init__()
lowerCAmelCase_ : int =DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
lowerCAmelCase_ : Tuple =deepcopy(self.diffusion )
lowerCAmelCase_ : Optional[Any] =torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCAmelCase_ : List[Any] =MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__lowercase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowercase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowercase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowercase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowercase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowercase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> int:
for key, value in ATTN_MAP.items():
if name.startswith(_SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif name.startswith(_SCREAMING_SNAKE_CASE ):
return [name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(f'Attn error with {name}' )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] =input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowerCAmelCase_ : Tuple =0
if string.startswith('''net.3.''' ):
depth += 1
lowerCAmelCase_ : Optional[int] =string[6:]
elif string.startswith('''net.''' ):
lowerCAmelCase_ : Any =string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowerCAmelCase_ : Any =string[7:]
if string.startswith('''main.''' ):
lowerCAmelCase_ : List[str] =string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase_ : Any =string[:2]
lowerCAmelCase_ : str =string[2:]
else:
lowerCAmelCase_ : Any =string[0]
lowerCAmelCase_ : Union[str, Any] =string[1:]
if depth == max_depth:
lowerCAmelCase_ : str =MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : Union[str, Any] ='''mid_block'''
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) < 7:
lowerCAmelCase_ : Any =DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : List[str] =f'down_blocks.{depth}'
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) > 7:
lowerCAmelCase_ : List[str] =UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : str =f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowerCAmelCase_ : Optional[int] =DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase_ : int =f'up_blocks.{max_depth - 1}' if int(_SCREAMING_SNAKE_CASE ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowerCAmelCase_ : Union[str, Any] =string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase_ : Optional[int] =convert_resconv_naming(_SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
lowerCAmelCase_ : Dict =convert_attn_naming(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] =new_string_left
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Dict =prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowerCAmelCase_ : Optional[int] =[prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] ={}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase_ : int =rename(_SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Any =transform_conv_attns(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ : Union[str, Any] =v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if len(_SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase_ : Union[str, Any] =v[:, :, 0]
else:
# bias
lowerCAmelCase_ : List[Any] =v
else:
# qkv matrices
lowerCAmelCase_ : Tuple =v.shape[0]
lowerCAmelCase_ : str =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase_ : Dict =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase_ : List[Any] =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowerCAmelCase_ : Optional[int] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ : List[str] =args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowerCAmelCase_ : List[Any] =download(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =MODELS_MAP[model_name]['''sample_rate''']
lowerCAmelCase_ : str =MODELS_MAP[model_name]['''sample_size''']
lowerCAmelCase_ : Tuple =Object()
lowerCAmelCase_ : List[str] =sample_size
lowerCAmelCase_ : Optional[int] =sample_rate
lowerCAmelCase_ : Optional[Any] =0
lowerCAmelCase_ : List[Any] =UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , sample_rate=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =diffusers_model.state_dict()
lowerCAmelCase_ : int =DiffusionUncond(_SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_SCREAMING_SNAKE_CASE )['''state_dict'''] )
lowerCAmelCase_ : List[Any] =orig_model.diffusion_ema.eval()
lowerCAmelCase_ : Optional[int] =orig_model.state_dict()
lowerCAmelCase_ : Dict =rename_orig_weights(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase_ : Any =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_SCREAMING_SNAKE_CASE ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(_SCREAMING_SNAKE_CASE ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowerCAmelCase_ : List[Any] =value.squeeze()
lowerCAmelCase_ : Union[str, Any] =value
diffusers_model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Tuple =100
lowerCAmelCase_ : List[str] =33
lowerCAmelCase_ : Dict =IPNDMScheduler(num_train_timesteps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =torch.manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =torch.randn([1, 2, config.sample_size] , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[str] =torch.linspace(1 , 0 , steps + 1 , device=_SCREAMING_SNAKE_CASE )[:-1]
lowerCAmelCase_ : Optional[Any] =get_crash_schedule(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str =DanceDiffusionPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Dict =torch.manual_seed(33 )
lowerCAmelCase_ : Tuple =pipe(num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).audios
lowerCAmelCase_ : str =sampling.iplms_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {} )
lowerCAmelCase_ : int =generated.clamp(-1 , 1 )
lowerCAmelCase_ : List[str] =(generated - audio).abs().sum()
lowerCAmelCase_ : Optional[int] =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _SCREAMING_SNAKE_CASE )
print('''Diff max''' , _SCREAMING_SNAKE_CASE )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowercase = parser.parse_args()
main(args)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''MobileNetV2FeatureExtractor''']
__lowercase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305
| 0
|
from copy import deepcopy
class __A:
def __init__( self , _snake_case = None , _snake_case = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
__a = size
__a = [0] * size
elif arr is not None:
self.init(_snake_case )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = len(_snake_case )
__a = deepcopy(_snake_case )
for i in range(1 , self.size ):
__a = self.next_(_snake_case )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE_ ( self ) -> list[int]:
'''simple docstring'''
__a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__a = self.next_(_snake_case )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> int:
'''simple docstring'''
return index - (index & (-index))
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__a = self.next_(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
self.add(_snake_case , value - self.get(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if right == 0:
return 0
__a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__a = self.prev(_snake_case )
return result
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
return self.prefix(_snake_case ) - self.prefix(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
return self.query(_snake_case , index + 1 )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
__a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
|
def __lowerCAmelCase ( a__ , a__ ) -> None:
__a = len(a__ )
print('''The following activities are selected:''' )
# The first activity is always selected
__a = 0
print(a__ , end=''',''' )
# Consider rest of the activities
for j in range(a__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a__ , end=''',''' )
__a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Tuple = [1, 3, 0, 5, 8, 5]
A : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 219
| 1
|
'''simple docstring'''
__lowerCamelCase : str = 8.314_4598
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowerCamelCase : Optional[Any] = 300
__lowerCamelCase : Tuple = 28
__lowerCamelCase : str = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 708
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCamelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 459
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
_UpperCAmelCase = 6
_UpperCAmelCase = 1_2_8
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (4, 8, 1_6, 3_2)
elif "large" in model_name:
_UpperCAmelCase = 1_2
_UpperCAmelCase = 1_9_2
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('Model not supported, only supports base and large variants' )
_UpperCAmelCase = window_size
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
return config
def lowerCAmelCase__ ( a__: Any ) -> str:
'''simple docstring'''
if "encoder.mask_token" in name:
_UpperCAmelCase = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "encoder.norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "decoder" in name:
pass
else:
_UpperCAmelCase = 'swin.' + name
return name
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[2] )
_UpperCAmelCase = int(key_split[4] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def lowerCAmelCase__ ( a__: Tuple , a__: str , a__: Tuple , a__: List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase = torch.load(A__ , map_location='cpu' )['model']
_UpperCAmelCase = get_swin_config(A__ )
_UpperCAmelCase = SwinForMaskedImageModeling(A__ )
model.eval()
_UpperCAmelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = ViTImageProcessor(size={'height': 1_9_2, 'width': 1_9_2} )
_UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
_UpperCAmelCase = image_processor(images=A__ , return_tensors='pt' )
with torch.no_grad():
_UpperCAmelCase = model(**A__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ :Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 618
|
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Any = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a__ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a__ : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=None) -> Any:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)),
}
| 622
| 0
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
if n == 0:
return 0
lowerCamelCase_ : Optional[Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCamelCase_ : Any = max(
__UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCAmelCase ) )
return max_revue
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase_ : Optional[int] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowerCamelCase_ : int = max(
__UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCAmelCase , __UpperCAmelCase ) , )
lowerCamelCase_ : Dict = max_revenue
return max_rev[n]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
_enforce_args(__UpperCAmelCase , __UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase_ : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )]
lowerCamelCase_ : Optional[int] = 0
for i in range(1 , n + 1 ):
lowerCamelCase_ : Optional[int] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase_ : int = max(__UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowerCamelCase_ : Dict = max_revenue_i
return max_rev[n]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if n < 0:
lowerCamelCase_ : Any = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__UpperCAmelCase )
if n > len(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Dict = [6, 10, 12, 15, 20, 23]
lowerCamelCase_ : Tuple = len(__UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase_ : Any = 36
lowerCamelCase_ : Dict = top_down_cut_rod(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = bottom_up_cut_rod(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = naive_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 418
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Union[str, Any] = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase_ : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase )
| 418
| 1
|
import os
def lowerCamelCase__ ( __A :str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__A ) ,__A ) ) as input_file:
__snake_case = [
[int(__A ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__snake_case = len(__A )
__snake_case = len(matrix[0] )
__snake_case = [[-1 for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
__snake_case = matrix[i][0]
for j in range(1 ,__A ):
for i in range(__A ):
__snake_case = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,__A ):
__snake_case = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
__snake_case = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 268
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ = re.compile(r'''\s+''')
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__A ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = [len(__A ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__A ), "line_max": max(__A )}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( __A :str ,__A :int ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCamelCase__ ( __A :Optional[int] ,__A :int=5 ):
"""simple docstring"""
__snake_case = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__snake_case = example["""content"""].splitlines()
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( __A :int ,__A :int=5 ,__A :str=0.05 ):
"""simple docstring"""
__snake_case = ["""unit tests""", """test file""", """configuration file"""]
__snake_case = example["""content"""].splitlines()
__snake_case = 0
__snake_case = 0
# first test
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case = example["""content"""].count("""\n""" )
__snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = ["""def """, """class """, """for """, """while """]
__snake_case = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( __A :Dict ,__A :List[str]=4 ):
"""simple docstring"""
__snake_case = example["""content"""].splitlines()
__snake_case = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = tokenizer(example["""content"""] ,truncation=__A )["""input_ids"""]
__snake_case = len(example["""content"""] ) / len(__A )
return {"ratio": ratio}
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = {}
results.update(get_hash(__A ) )
results.update(line_stats(__A ) )
results.update(alpha_stats(__A ) )
results.update(char_token_ratio(__A ) )
results.update(is_autogenerated(__A ) )
results.update(is_config_or_test(__A ) )
results.update(has_no_keywords(__A ) )
results.update(has_few_assignments(__A ) )
return results
def lowerCamelCase__ ( __A :int ,__A :str ,__A :Dict ):
"""simple docstring"""
if not check_uniques(__A ,__A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""rb""" ) as f_in:
with gzip.open(str(__A ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__A ,__A )
os.unlink(__A )
# Settings
UpperCamelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ = multiprocessing.cpu_count()
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ = time.time()
UpperCamelCase__ = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
UpperCamelCase__ = set(ds.unique('''hash'''))
UpperCamelCase__ = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ = time.time()
UpperCamelCase__ ,UpperCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
UpperCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ = str(data_dir / F'file-{file_number+1:012}.json')
UpperCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 268
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : List[Any] ={
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , __lowercase = None , **__lowercase , ) -> None:
lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Tuple = do_lower_case
lowerCAmelCase_ : Any = remove_space
lowerCAmelCase_ : Optional[int] = keep_accents
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowerCAmelCase_ : Optional[int] = jieba
lowerCAmelCase_ : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ ( self ) -> List[Any]:
return len(self.sp_model )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ : List[str] = None
return state
def __setstate__( self , __lowercase ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase_ : str = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase_ : Optional[Any] = inputs
lowerCAmelCase_ : Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase_ : List[Any] = unicodedata.normalize('''NFKD''' , __lowercase )
lowerCAmelCase_ : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
lowerCAmelCase_ : Dict = outputs.lower()
return outputs
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : List[str] = self.preprocess_text(__lowercase )
lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase , out_type=__lowercase )
lowerCAmelCase_ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : Optional[int] = cur_pieces[1:]
else:
lowerCAmelCase_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def lowercase_ ( self , __lowercase ) -> int:
return self.sp_model.PieceToId(__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
return self.sp_model.IdToPiece(__lowercase )
def lowercase_ ( self , __lowercase ) -> Tuple:
lowerCAmelCase_ : Any = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = super()._decode(*__lowercase , **__lowercase )
lowerCAmelCase_ : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 702
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 619
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCamelCase = load_file(lowercase_ )
UpperCamelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCamelCase = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
UpperCamelCase = pipeline.text_encoder
else:
UpperCamelCase = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
UpperCamelCase = pipeline.unet
# find the target layer
UpperCamelCase = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
UpperCamelCase = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
UpperCamelCase = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCamelCase = layer_infos.pop(0 )
UpperCamelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCamelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCamelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCamelCase = state_dict[pair_keys[0]].to(torch.floataa )
UpperCamelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__a : List[Any] = parser.parse_args()
__a : Optional[Any] = args.base_model_path
__a : List[str] = args.checkpoint_path
__a : Any = args.dump_path
__a : Optional[Any] = args.lora_prefix_unet
__a : List[str] = args.lora_prefix_text_encoder
__a : int = args.alpha
__a : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__a : Dict = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 606
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( A , A=7 ):
'''simple docstring'''
UpperCAmelCase__ =None
if token is not None:
UpperCAmelCase__ ={"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
UpperCAmelCase__ ="636036"
UpperCAmelCase__ =F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
UpperCAmelCase__ =requests.get(A , headers=A ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =get_daily_ci_runs(A )
UpperCAmelCase__ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCAmelCase__ =workflow_run["id"]
break
return workflow_run_id
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =get_last_daily_ci_runs(A )
if workflow_run_id is not None:
UpperCAmelCase__ =get_artifacts_links(worflow_run_id=A , token=A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCAmelCase__ =artifacts_links[artifact_name]
download_artifact(
artifact_name=A , artifact_url=A , output_dir=A , token=A )
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
get_last_daily_ci_artifacts(A , A , A )
UpperCAmelCase__ ={}
for artifact_name in artifact_names:
UpperCAmelCase__ =os.path.join(A , F"""{artifact_name}.zip""" )
if os.path.isfile(A ):
UpperCAmelCase__ ={}
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
with z.open(A ) as f:
UpperCAmelCase__ =f.read().decode("UTF-8" )
return results
| 625
| 0
|
def __lowerCAmelCase ( __magic_name__ ):
if len(__magic_name__ ) < 2:
return collection
def circle_sort_util(__magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
_lowercase: int = False
if low == high:
return swapped
_lowercase: str = low
_lowercase: Optional[int] = high
while left < right:
if collection[left] > collection[right]:
_lowercase , _lowercase: int = (
collection[right],
collection[left],
)
_lowercase: Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowercase , _lowercase: List[Any] = (
collection[right + 1],
collection[left],
)
_lowercase: Optional[Any] = True
_lowercase: List[Any] = low + int((high - low) / 2 )
_lowercase: str = circle_sort_util(__magic_name__ , __magic_name__ , __magic_name__ )
_lowercase: Union[str, Any] = circle_sort_util(__magic_name__ , mid + 1 , __magic_name__ )
return swapped or left_swap or right_swap
_lowercase: Optional[int] = True
while is_not_sorted is True:
_lowercase: Optional[int] = circle_sort_util(__magic_name__ , 0 , len(__magic_name__ ) - 1 )
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = input('Enter numbers separated by a comma:\n').strip()
_SCREAMING_SNAKE_CASE : Any = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 206
|
_SCREAMING_SNAKE_CASE : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowercase: Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowercase: Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowercase: Tuple = _calculate(days - 1 , __magic_name__ , 0 )
_lowercase: List[str] = state_late + state_absent + state_ontime
_lowercase: Optional[int] = prizestrings
return prizestrings
def __lowerCAmelCase ( __magic_name__ = 3_0 ):
return _calculate(__magic_name__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 206
| 1
|
def __a ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : int = [0] * len(__snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__snake_case ) ):
if indegree[i] == 0:
queue.append(__snake_case )
while queue:
SCREAMING_SNAKE_CASE : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(__snake_case )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__snake_case )
if cnt != len(__snake_case ):
print('Cycle exists' )
else:
print(__snake_case )
# Adjacency List of Graph
_lowerCamelCase : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 352
|
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=10 , lowerCamelCase=[8, 16, 32, 64] , lowerCamelCase=[1, 1, 2, 1] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=["stage2", "stage3", "stage4"] , lowerCamelCase=[2, 3, 4] , lowerCamelCase=1 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(lowerCamelCase )
_snake_case = out_features
_snake_case = out_indices
_snake_case = num_groups
def UpperCamelCase( self ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = BitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.num_labels
_snake_case = BitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = BitBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = BitBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
def UpperCamelCase( self ):
_snake_case = BitModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
@unittest.skip(reason="Bit does not output attentions" )
def UpperCamelCase( self ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def UpperCamelCase( self ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCamelCase( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = BitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCamelCase( self ):
_snake_case = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_snake_case = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : str = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Any = BitConfig
UpperCAmelCase__ : Optional[Any] = False
def UpperCamelCase( self ):
_snake_case = BitModelTester(self )
| 368
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__magic_name__ : List[str] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
return (preds == labels).mean()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
_snake_case = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
_snake_case = pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
_snake_case = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mrpc":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "sts-b":
return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "qqp":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE__ )
| 368
| 1
|
A : Optional[Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
A : List[str] = True
A : Union[str, Any] = False
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(__magic_name__ ) )
lowercase__ = number_chain
while number < 1000_0000:
lowercase__ = number_chain
number *= 10
return number_chain
def UpperCamelCase ( __magic_name__ : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __magic_name__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 15
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="roberta"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=5_02_65 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 282
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """speech_to_text_2"""
a__ : List[Any] = ["""past_key_values"""]
a__ : Optional[int] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=10_000 , __lowercase=6 , __lowercase=2_048 , __lowercase=4 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=2 , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_024 , **__lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Dict = d_model
__UpperCamelCase :List[str] = decoder_ffn_dim
__UpperCamelCase :Union[str, Any] = decoder_layers
__UpperCamelCase :List[Any] = decoder_attention_heads
__UpperCamelCase :List[Any] = dropout
__UpperCamelCase :Optional[int] = attention_dropout
__UpperCamelCase :Any = activation_dropout
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[int] = init_std
__UpperCamelCase :Optional[int] = decoder_layerdrop
__UpperCamelCase :List[Any] = use_cache
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase :Dict = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 452
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowercase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCamelCase : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def a_ ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = state_dict.pop(_A )
snake_case__ = val
def a_ ( _A ) -> Any:
"""simple docstring"""
snake_case__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case__ = value
else:
snake_case__ = value
return new_state_dict
def a_ ( _A , _A=False ) -> Any:
"""simple docstring"""
snake_case__ = ''
if is_panoptic:
snake_case__ = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:256, :]
snake_case__ = in_proj_bias[:256]
snake_case__ = in_proj_weight[256:512, :]
snake_case__ = in_proj_bias[256:512]
snake_case__ = in_proj_weight[-256:, :]
snake_case__ = in_proj_bias[-256:]
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def a_ ( _A , _A ) -> Any:
"""simple docstring"""
snake_case__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ = 'resnet101'
if "dc5" in model_name:
snake_case__ = True
snake_case__ = 'panoptic' in model_name
if is_panoptic:
snake_case__ = 250
else:
snake_case__ = 91
snake_case__ = 'huggingface/label-files'
snake_case__ = 'coco-detection-id2label.json'
snake_case__ = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(_A ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case__ = ConditionalDetrImageProcessor(format=_A )
# prepare image
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_A , return_tensors='pt' )
snake_case__ = encoding['pixel_values']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
snake_case__ = torch.hub.load('DeppMeng/ConditionalDETR' , _A , pretrained=_A ).eval()
snake_case__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ = 'conditional_detr.' + src
rename_key(_A , _A , _A )
snake_case__ = rename_backbone_keys(_A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case__ = state_dict.pop(_A )
snake_case__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ = state_dict.pop(_A )
snake_case__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case__ = state_dict.pop(_A )
snake_case__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case__ = state_dict.pop(_A )
snake_case__ = val
# finally, create HuggingFace model and load state dict
snake_case__ = ConditionalDetrForSegmentation(_A ) if is_panoptic else ConditionalDetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
model.push_to_hub(repo_id=_A , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case__ = conditional_detr(_A )
snake_case__ = model(_A )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 328
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328
| 1
|
"""simple docstring"""
from ....utils import logging
_A = logging.get_logger(__name__)
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : Optional[int] , _snake_case : Dict=None , _snake_case : Optional[Any]=2048 ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = config.__dict__
SCREAMING_SNAKE_CASE__ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE__ = num_labels
| 538
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[int] = None , _snake_case : str = "geglu" , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : str = "layer_norm" , _snake_case : bool = False , ) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = only_cross_attention
SCREAMING_SNAKE_CASE__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
SCREAMING_SNAKE_CASE__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ = AdaLayerNorm(_snake_case , _snake_case )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = AdaLayerNormZero(_snake_case , _snake_case )
else:
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
SCREAMING_SNAKE_CASE__ = Attention(
query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , dropout=_snake_case , bias=_snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE__ = (
AdaLayerNorm(_snake_case , _snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
)
SCREAMING_SNAKE_CASE__ = Attention(
query_dim=_snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_snake_case , dim_head=_snake_case , dropout=_snake_case , bias=_snake_case , upcast_attention=_snake_case , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
SCREAMING_SNAKE_CASE__ = FeedForward(_snake_case , dropout=_snake_case , activation_fn=_snake_case , final_dropout=_snake_case )
# let chunk size default to None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[int] , _snake_case : int ) -> List[str]:
# Sets chunk feed-forward
SCREAMING_SNAKE_CASE__ = chunk_size
SCREAMING_SNAKE_CASE__ = dim
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.LongTensor] = None , _snake_case : Dict[str, Any] = None , _snake_case : Optional[torch.LongTensor] = None , ) -> Union[str, Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case , _snake_case )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.norma(
_snake_case , _snake_case , _snake_case , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case )
SCREAMING_SNAKE_CASE__ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE__ = self.attna(
_snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_snake_case , **_snake_case , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE__ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE__ = (
self.norma(_snake_case , _snake_case ) if self.use_ada_layer_norm else self.norma(_snake_case )
)
SCREAMING_SNAKE_CASE__ = self.attna(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=_snake_case , **_snake_case , )
SCREAMING_SNAKE_CASE__ = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
SCREAMING_SNAKE_CASE__ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE__ = torch.cat(
[self.ff(_snake_case ) for hid_slice in norm_hidden_states.chunk(_snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE__ = self.ff(_snake_case )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE__ = ff_output + hidden_states
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _snake_case : int , _snake_case : Optional[int] = None , _snake_case : int = 4 , _snake_case : float = 0.0 , _snake_case : str = "geglu" , _snake_case : bool = False , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = int(dim * mult )
SCREAMING_SNAKE_CASE__ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE__ = GELU(_snake_case , _snake_case )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE__ = GELU(_snake_case , _snake_case , approximate="tanh" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE__ = GEGLU(_snake_case , _snake_case )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE__ = ApproximateGELU(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
# project in
self.net.append(_snake_case )
# project dropout
self.net.append(nn.Dropout(_snake_case ) )
# project out
self.net.append(nn.Linear(_snake_case , _snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_snake_case ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[str] ) -> str:
for module in self.net:
SCREAMING_SNAKE_CASE__ = module(_snake_case )
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : str , _snake_case : int , _snake_case : int , _snake_case : str = "none" ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = approximate
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[Any] ) -> Tuple:
if gate.device.type != "mps":
return F.gelu(_snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.proj(_snake_case )
SCREAMING_SNAKE_CASE__ = self.gelu(_snake_case )
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _snake_case : int , _snake_case : int ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , dim_out * 2 )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(_snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.proj(_snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_snake_case )
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : int , _snake_case : int , _snake_case : int ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , _snake_case )
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.proj(_snake_case )
return x * torch.sigmoid(1.702 * x )
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Any ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Embedding(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , embedding_dim * 2 )
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
def lowerCAmelCase_ ( self : int , _snake_case : int , _snake_case : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.linear(self.silu(self.emb(_snake_case ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torch.chunk(_snake_case , 2 )
SCREAMING_SNAKE_CASE__ = self.norm(_snake_case ) * (1 + scale) + shift
return x
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Tuple ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = CombinedTimestepLabelEmbeddings(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , 6 * embedding_dim , bias=_snake_case )
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case , eps=1e-6 )
def lowerCAmelCase_ ( self : Dict , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None ) -> str:
SCREAMING_SNAKE_CASE__ = self.linear(self.silu(self.emb(_snake_case , _snake_case , hidden_dtype=_snake_case ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE__ = self.norm(_snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Optional[str] = None , _snake_case : float = 1e-5 ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = num_groups
SCREAMING_SNAKE_CASE__ = eps
if act_fn is None:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = get_activation(_snake_case )
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , out_dim * 2 )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Any , _snake_case : Optional[Any] ) -> Optional[int]:
if self.act:
SCREAMING_SNAKE_CASE__ = self.act(_snake_case )
SCREAMING_SNAKE_CASE__ = self.linear(_snake_case )
SCREAMING_SNAKE_CASE__ = emb[:, :, None, None]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE__ = F.group_norm(_snake_case , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 538
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CodeGenTokenizer
UpperCAmelCase__ = CodeGenTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {'''add_prefix_space''': True}
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(UpperCAmelCase__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple , **UpperCAmelCase__ : List[str]) ->Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = '''lower newer'''
A__ = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A__ = '''lower newer'''
A__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A__ = tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__)
A__ = '''lower newer'''
# Testing tokenization
A__ = tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
A__ = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple=15) ->Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''')
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(UpperCAmelCase__ , padding='''max_length''' , max_length=30 , return_tensors='''np''')
A__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''')
A__ = tokenizer(*UpperCAmelCase__ , padding='''max_length''' , max_length=60 , return_tensors='''np''')
A__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors='''np''')
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s['''input_ids'''])
self.assertTrue(0 in out_s['''attention_mask'''])
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0])
self.assertFalse(0 in out_sa['''attention_mask'''][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1])
self.assertTrue(0 in out_sa['''attention_mask'''][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p['''input_ids'''])
self.assertTrue(0 in out_p['''attention_mask'''])
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0])
self.assertFalse(0 in out_pa['''attention_mask'''][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1])
self.assertTrue(0 in out_pa['''attention_mask'''][1])
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = '''$$$'''
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__)
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = tokenizer.bos_token_id
A__ = tokenizer(UpperCAmelCase__)
A__ = tokenizer(UpperCAmelCase__)
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase__)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
A__ = tokenizer.decode(out_s.input_ids)
A__ = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , UpperCAmelCase__)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
A__ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''')
A__ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
A__ = '''\nif len_a > len_b: result = a\nelse: result = b'''
A__ = tokenizer.encode(UpperCAmelCase__)
A__ = ['''^#''', re.escape('''<|endoftext|>'''), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
A__ = tokenizer.decode(UpperCAmelCase__ , truncate_before_pattern=UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
pass
| 87
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = set()
A__ = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
A__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
A__ = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
A__ = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
A__ = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = set()
A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return values.split(''',''' )
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets)
_lowerCamelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 87
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : str = AudioClassificationPipeline(model=lowercase__ , feature_extractor=lowercase__ )
# test with a raw waveform
snake_case_ : Union[str, Any] = np.zeros((3_40_00,) )
snake_case_ : List[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ , snake_case_ : int = examples
snake_case_ : Dict = audio_classifier(lowercase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
snake_case_ : List[str] = audio_classifier(lowercase__ , top_k=1 )
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
self.run_torchaudio(lowercase__ )
@require_torchaudio
def __UpperCamelCase (self , lowercase__ ):
import datasets
# test with a local file
snake_case_ : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
snake_case_ : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
snake_case_ : Union[str, Any] = audio_classifier(lowercase__ )
self.assertEqual(
lowercase__ , [
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
{"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )},
] , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : List[str] = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case_ : List[str] = pipeline("""audio-classification""" , model=lowercase__ )
snake_case_ : Dict = np.ones((80_00,) )
snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 )
snake_case_ : Dict = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
snake_case_ : str = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ : Union[str, Any] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 )
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __UpperCamelCase (self ):
import datasets
snake_case_ : Union[str, Any] = """superb/wav2vec2-base-superb-ks"""
snake_case_ : List[Any] = pipeline("""audio-classification""" , model=lowercase__ )
snake_case_ : Optional[int] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
snake_case_ : str = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
snake_case_ : Dict = audio_classifier(lowercase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __UpperCamelCase (self ):
pass
| 48
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = StableDiffusionInstructPixaPixPipeline
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase__ = CLIPTextModel(snake_case__ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=0 ):
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" )
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = sd_pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = """french fries"""
lowerCAmelCase__ = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = [inputs["""prompt"""]] * 2
lowerCAmelCase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
lowerCAmelCase__ = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
lowerCAmelCase__ = image / 2 + 0.5
lowerCAmelCase__ = image.permute(0 , 3 , 1 , 2 )
lowerCAmelCase__ = image.repeat(2 , 1 , 1 , 1 )
lowerCAmelCase__ = sd_pipe(**snake_case__ ).images
lowerCAmelCase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCAmelCase__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase__ = sd_pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(snake_case__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowerCAmelCase__ = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="""pt""" ) )[0]
lowerCAmelCase__ = components["""vae"""]
lowerCAmelCase__ = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCAmelCase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCAmelCase__ = pipe(**snake_case__ )[0]
lowerCAmelCase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case__ , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int=0 ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowerCAmelCase__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ )
lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ )
lowerCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = 0
def callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : torch.FloatTensor ) -> None:
lowerCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCAmelCase__ = False
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = self.get_inputs()
lowerCAmelCase__ = pipe(**snake_case__ )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ = inputs["""image"""].resize((504, 504) )
lowerCAmelCase__ = """timbrooks/instruct-pix2pix"""
lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = pipe(**snake_case__ )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCAmelCase__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 644
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCAmelCase__ , lowerCAmelCase__ = y, x % y
return abs(lowerCamelCase__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
try:
lowerCAmelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCAmelCase__ = int(nums[0] )
lowerCAmelCase__ = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowerCamelCase__ , lowerCamelCase__ )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__ , lowerCamelCase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 644
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : List[str], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : str=7, SCREAMING_SNAKE_CASE_ : List[str]=3, SCREAMING_SNAKE_CASE_ : Optional[int]=30, SCREAMING_SNAKE_CASE_ : str=4_00, SCREAMING_SNAKE_CASE_ : Optional[Any]=True, SCREAMING_SNAKE_CASE_ : Optional[Any]=None, SCREAMING_SNAKE_CASE_ : Any=True, SCREAMING_SNAKE_CASE_ : Optional[Any]=1 / 2_55, SCREAMING_SNAKE_CASE_ : str=True, SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_ : Optional[int]=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case : Optional[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
snake_case : int = parent
snake_case : int = batch_size
snake_case : Optional[Any] = num_channels
snake_case : Union[str, Any] = min_resolution
snake_case : str = max_resolution
snake_case : Dict = do_resize
snake_case : Any = size
snake_case : List[str] = do_rescale
snake_case : Optional[int] = rescale_factor
snake_case : Optional[int] = do_normalize
snake_case : Any = image_mean
snake_case : Dict = image_std
snake_case : List[Any] = do_pad
def __snake_case ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
if not batched:
snake_case : Dict = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_, Image.Image ):
snake_case, snake_case : Dict = image.size
else:
snake_case, snake_case : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case : List[Any] = int(self.size['''shortest_edge'''] * h / w )
snake_case : List[Any] = self.size['''shortest_edge''']
elif w > h:
snake_case : List[str] = self.size['''shortest_edge''']
snake_case : List[str] = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case : Union[str, Any] = self.size['''shortest_edge''']
snake_case : Optional[Any] = self.size['''shortest_edge''']
else:
snake_case : Dict = []
for image in image_inputs:
snake_case, snake_case : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case : Tuple = max(SCREAMING_SNAKE_CASE_, key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
snake_case : Optional[int] = max(SCREAMING_SNAKE_CASE_, key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __magic_name__ ,unittest.TestCase ):
_snake_case = DetrImageProcessor if is_vision_available() else None
def __snake_case ( self : int ):
snake_case : Optional[int] = DetrImageProcessingTester(self )
@property
def __snake_case ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Tuple ):
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''do_rescale''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''rescale_factor''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, '''do_pad''' ) )
def __snake_case ( self : int ):
snake_case : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad, SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] ):
pass
def __snake_case ( self : Dict ):
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
snake_case, snake_case : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case, snake_case : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_, batched=SCREAMING_SNAKE_CASE_ )
snake_case : str = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def __snake_case ( self : List[str] ):
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
snake_case : int = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
snake_case, snake_case : Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case : List[str] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).pixel_values
snake_case, snake_case : Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_, batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def __snake_case ( self : Dict ):
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
snake_case : int = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
snake_case, snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case : List[str] = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).pixel_values
snake_case, snake_case : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_, batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def __snake_case ( self : Any ):
# prepare image and target
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
snake_case : Union[str, Any] = json.loads(f.read() )
snake_case : List[str] = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
snake_case : Tuple = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
snake_case : int = image_processing(images=SCREAMING_SNAKE_CASE_, annotations=SCREAMING_SNAKE_CASE_, return_tensors='''pt''' )
# verify pixel values
snake_case : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape, SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
# verify area
snake_case : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], SCREAMING_SNAKE_CASE_ ) )
# verify boxes
snake_case : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, SCREAMING_SNAKE_CASE_ )
snake_case : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# verify image_id
snake_case : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
snake_case : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
snake_case : Union[str, Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], SCREAMING_SNAKE_CASE_ ) )
# verify size
snake_case : Tuple = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
snake_case : str = json.loads(f.read() )
snake_case : Optional[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
snake_case : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case : Tuple = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
snake_case : List[str] = image_processing(images=SCREAMING_SNAKE_CASE_, annotations=SCREAMING_SNAKE_CASE_, masks_path=SCREAMING_SNAKE_CASE_, return_tensors='''pt''' )
# verify pixel values
snake_case : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape, SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
# verify area
snake_case : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], SCREAMING_SNAKE_CASE_ ) )
# verify boxes
snake_case : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, SCREAMING_SNAKE_CASE_ )
snake_case : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# verify image_id
snake_case : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
snake_case : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
snake_case : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], SCREAMING_SNAKE_CASE_ ) )
# verify masks
snake_case : Any = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), SCREAMING_SNAKE_CASE_ )
# verify orig_size
snake_case : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], SCREAMING_SNAKE_CASE_ ) )
# verify size
snake_case : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], SCREAMING_SNAKE_CASE_ ) )
| 555
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 555
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
@dataclass
class _snake_case :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : int=6.0 , UpperCAmelCase : Any=None , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="fp4" , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Optional[Any] , ):
__lowerCamelCase : str = load_in_abit
__lowerCamelCase : Union[str, Any] = load_in_abit
__lowerCamelCase : Optional[Any] = llm_inta_threshold
__lowerCamelCase : str = llm_inta_skip_modules
__lowerCamelCase : Union[str, Any] = llm_inta_enable_fpaa_cpu_offload
__lowerCamelCase : Dict = llm_inta_has_fpaa_weight
__lowerCamelCase : str = bnb_abit_quant_type
__lowerCamelCase : Union[str, Any] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowerCamelCase : int = torch.floataa
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Any = getattr(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , torch.dtype ):
__lowerCamelCase : List[Any] = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def lowerCamelCase__ ( self : str ):
if not isinstance(self.llm_inta_threshold , UpperCAmelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def lowerCamelCase__ ( self : Any ):
return self.load_in_abit or self.load_in_abit
def lowerCamelCase__ ( self : Optional[int] ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCamelCase__ ( cls : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[Any] = cls(**UpperCAmelCase )
__lowerCamelCase : int = []
for key, value in kwargs.items():
if hasattr(UpperCAmelCase , UpperCAmelCase ):
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
to_remove.append(UpperCAmelCase )
for key in to_remove:
kwargs.pop(UpperCAmelCase , UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Dict ):
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
__lowerCamelCase : Optional[int] = self.to_dict()
__lowerCamelCase : Optional[Any] = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + """\n"""
writer.write(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : int = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : Optional[Any] ):
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[str] = True ):
if use_diff is True:
__lowerCamelCase : Optional[Any] = self.to_diff_dict()
else:
__lowerCamelCase : Tuple = self.to_dict()
return json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + "\n"
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.to_dict()
# get the default config dict
__lowerCamelCase : Optional[int] = BitsAndBytesConfig().to_dict()
__lowerCamelCase : Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowerCamelCase : List[Any] = value
return serializable_config_dict
| 646
|
# Lint as: python3
import itertools
import os
import re
_lowercase = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
_lowercase = re.compile(r'''([a-z\d])([A-Z])''')
_lowercase = re.compile(r'''(?<!_)_(?!_)''')
_lowercase = re.compile(r'''(_{2,})''')
_lowercase = r'''^\w+(\.\w+)*$'''
_lowercase = r'''<>:/\|?*'''
def _A (UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : List[str] = _uppercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
lowerCamelCase__ : Optional[int] = _lowercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
return name.lower()
def _A (UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = _single_underscore_re.split(UpperCamelCase )
lowerCamelCase__ : int = [_multiple_underscores_re.split(UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase ) if n != """""" )
def _A (UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase )
def _A (UpperCamelCase : int , UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(UpperCamelCase )}-{split}"
def _A (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=None ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Any = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowerCamelCase__ : List[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
return f"{filepath}*"
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : List[Any] = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase , UpperCamelCase )
if shard_lengths:
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
lowerCamelCase__ : List[Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase )]
if filetype_suffix:
lowerCamelCase__ : Tuple = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCamelCase__ : List[str] = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 157
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __a ( __lowerCamelCase : int ) -> bool:
'''simple docstring'''
lowercase_ = int(number**0.5 )
return number == sq * sq
def __a ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> tuple[int, int]:
'''simple docstring'''
lowercase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase_ = x_den * y_den * z_den
lowercase_ = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __a ( __lowerCamelCase : int = 35 ) -> int:
'''simple docstring'''
lowercase_ = set()
lowercase_ = 42
lowercase_ = Fraction(0 )
lowercase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase_ = x_num * y_den + x_den * y_num
lowercase_ = x_den * y_den
lowercase_ = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
lowercase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
lowercase_ = int(sqrt(__lowerCamelCase ) )
lowercase_ = int(sqrt(__lowerCamelCase ) )
lowercase_ = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
lowercase_ = x_num * y_num
lowercase_ = x_den * y_num + x_num * y_den
lowercase_ = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
lowercase_ = x_num * x_num * y_num * y_num
lowercase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
lowercase_ = int(sqrt(__lowerCamelCase ) )
lowercase_ = int(sqrt(__lowerCamelCase ) )
lowercase_ = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 461
|
'''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : int | str ) -> bool:
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
return n == n[::-1]
def __a ( __lowerCamelCase : int = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 0
for i in range(1 , __lowerCamelCase ):
if is_palindrome(__lowerCamelCase ) and is_palindrome(bin(__lowerCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 461
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a: Optional[int] = logging.get_logger(__name__)
__a: Any = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__a: Union[str, Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Tuple:
_UpperCAmelCase = torch.load(__snake_case , map_location="""cpu""" )
return sd
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case=rename_keys_prefix ) -> str:
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Any:
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 5_1_2}
_UpperCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8}
_UpperCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_UpperCAmelCase = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
_UpperCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
_UpperCAmelCase = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
_UpperCAmelCase = """nlvr"""
_UpperCAmelCase = VisualBertConfig(**__snake_case )
# Load State Dict
_UpperCAmelCase = load_state_dict(__snake_case )
_UpperCAmelCase = get_new_dict(__snake_case , __snake_case )
if model_type == "pretraining":
_UpperCAmelCase = VisualBertForPreTraining(__snake_case )
elif model_type == "vqa":
_UpperCAmelCase = VisualBertForQuestionAnswering(__snake_case )
elif model_type == "nlvr":
_UpperCAmelCase = VisualBertForVisualReasoning(__snake_case )
elif model_type == "multichoice":
_UpperCAmelCase = VisualBertForMultipleChoice(__snake_case )
model.load_state_dict(__snake_case )
# Save Checkpoints
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__a: Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 108
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 623
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
_snake_case
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
| 0
|
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
A = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
def _UpperCAmelCase ( self , **a__ ) -> Union[str, Any]:
A = {}
if "second_text" in kwargs:
A = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _UpperCAmelCase ( self , a__ , a__=None ) -> str:
return self.tokenizer(a__ , text_pair=a__ , return_tensors=self.framework )
def _UpperCAmelCase ( self , a__ ) -> Tuple:
return self.model(**a__ )
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A = model_outputs.logits[0].numpy()
A = softmax(a__ )
A = np.argmax(a__ )
A = self.model.config.idalabel[best_class]
A = probabilities[best_class].item()
A = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 641
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( UpperCamelCase__: List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""test""" )
else:
A = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Any:
"""simple docstring"""
A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A = script_name
else:
A = f'--config_file={args.config_file} {script_name}'
A = ["""accelerate-launch"""] + test_args.split()
A = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = test_command_parser()
A = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 641
| 1
|
import math
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] = 100 ) -> int:
_snake_case : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Union[str, Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
a__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ = """allenai"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
_snake_case : int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_snake_case : Tuple = d[k] # restore
return da
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
# prep
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case : Optional[Any] = basename(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : int = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_snake_case : List[str] = cls.hub_models()
_snake_case : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
_snake_case : Dict = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_snake_case : List[Any] = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = vars(chkpt["""args"""]["""model"""] )
_snake_case : Union[str, Any] = args["""source_lang"""]
_snake_case : Tuple = args["""target_lang"""]
_snake_case : Any = dirname(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{src_lang}.txt''' )
_snake_case : Any = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{tgt_lang}.txt''' )
_snake_case : List[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = rewrite_dict_keys(src_dict.indices )
_snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-src.json""" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_snake_case : str = True
for k in src_vocab.keys():
if not k.islower():
_snake_case : Any = False
break
_snake_case : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
_snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab-tgt.json""" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
_snake_case : str = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_snake_case : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as fin:
_snake_case : Dict = fin.read()
_snake_case : Optional[Any] = re.sub(R""" \d+$""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
_snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_snake_case : Optional[int] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.0_2,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
_snake_case : Tuple = 5
_snake_case : int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_snake_case : List[str] = best_score_hparams[model_dir]["""length_penalty"""]
else:
_snake_case : Optional[Any] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
_snake_case : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : str = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_024,
"""do_lower_case""": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
_snake_case : Optional[Any] = chkpt["""models"""][0]
_snake_case : List[str] = model.state_dict()
# rename keys to start with 'model.'
_snake_case : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_snake_case : Union[str, Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
_snake_case : int = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 198
| 0
|
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE__ : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
SCREAMING_SNAKE_CASE__ : Tuple = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
SCREAMING_SNAKE_CASE__ : int = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __lowercase( self : List[str] , a_ : Tuple , a_ : List[Any] , a_ : Union[str, Any]=None , a_ : str=1 , a_ : List[Any]="binary" , a_ : Union[str, Any]=None )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fa_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase )
return {"f1": float(__UpperCamelCase ) if score.size == 1 else score}
| 85
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
_lowercase = [0 for i in range(n + 1 )]
_lowercase = 1
_lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_lowercase = 1
_lowercase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 287
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : int , _snake_case : Any , _snake_case : Optional[Any]=7 , _snake_case : Dict=3 , _snake_case : int=30 , _snake_case : int=400 , _snake_case : List[str]=True , _snake_case : Tuple=None , _snake_case : List[Any]=True , _snake_case : Tuple=[0.5, 0.5, 0.5] , _snake_case : int=[0.5, 0.5, 0.5] , _snake_case : str=True , _snake_case : Tuple=1 / 255 , _snake_case : Union[str, Any]=True , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__lowerCAmelCase : Any = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Optional[int] = num_channels
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : Optional[Any] = max_resolution
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Tuple = size
__lowerCAmelCase : str = do_normalize
__lowerCAmelCase : Any = image_mean
__lowerCAmelCase : Tuple = image_std
__lowerCAmelCase : Optional[Any] = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : List[Any] = do_pad
def UpperCAmelCase__ ( self : Union[str, Any] )->List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any]=False )->int:
'''simple docstring'''
if not batched:
__lowerCAmelCase : List[str] = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : Dict = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase : Dict = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
__lowerCAmelCase : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
__lowerCAmelCase : Dict = self.size["""shortest_edge"""]
else:
__lowerCAmelCase : Union[str, Any] = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : int = max(snake_case_ , key=lambda _snake_case : item[0] )[0]
__lowerCAmelCase : List[str] = max(snake_case_ , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ ( _a ,unittest.TestCase ):
A_ = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : str = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase : Any = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Any )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Any = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : List[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase : Union[str, Any] = json.loads(f.read() )
__lowerCAmelCase : Tuple = {"""image_id""": 39769, """annotations""": target}
# encode them
__lowerCAmelCase : Dict = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__lowerCAmelCase : Dict = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__lowerCAmelCase : List[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__lowerCAmelCase : str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__lowerCAmelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__lowerCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify orig_size
__lowerCAmelCase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
@slow
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__lowerCAmelCase : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCAmelCase : Optional[int] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__lowerCAmelCase : str = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__lowerCAmelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__lowerCAmelCase : Dict = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__lowerCAmelCase : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__lowerCAmelCase : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__lowerCAmelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__lowerCAmelCase : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify masks
__lowerCAmelCase : Optional[Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case_ )
# verify orig_size
__lowerCAmelCase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
| 720
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_UpperCAmelCase = {
'facebook/xglm-564M': 2048,
}
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _snake_case : Dict , _snake_case : List[Any]="<s>" , _snake_case : int="</s>" , _snake_case : List[str]="</s>" , _snake_case : Dict="<s>" , _snake_case : Tuple="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : List[str] , )->None:
'''simple docstring'''
__lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowerCAmelCase : Tuple = 7
__lowerCAmelCase : Any = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__lowerCAmelCase : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__lowerCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__lowerCAmelCase : Union[str, Any] = len(self.sp_model )
__lowerCAmelCase : List[Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
__lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _snake_case : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowerCAmelCase : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Union[str, Any] )->Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase : Union[str, Any] = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[int] )->Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Any )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : int = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 240
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 76
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93
| 0
|
def snake_case__ ( ):
A : Any = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCamelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 423
|
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ = 20 ):
A : Optional[Any] = 1
for i in range(1 , n + 1 ):
A : Dict = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 423
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Any ={
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =[
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowercase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 0
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 716
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 162
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __A :
def __init__( self : Dict ) -> Optional[Any]:
__magic_name__: Dict = {}
def lowerCamelCase__ ( self : str , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any]=1 ) -> Optional[Any]:
if self.graph.get(_a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__magic_name__: List[Any] = [[w, v]]
if not self.graph.get(_a ):
__magic_name__: Optional[int] = []
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
return list(self.graph )
def lowerCamelCase__ ( self : str , __snake_case : Tuple , __snake_case : List[Any] ) -> Dict:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
def lowerCamelCase__ ( self : Tuple , __snake_case : List[str]=-2 , __snake_case : str=-1 ) -> Dict:
if s == d:
return []
__magic_name__: Dict = []
__magic_name__: Dict = []
if s == -2:
__magic_name__: List[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
__magic_name__: Optional[int] = stack[len(_a ) - 1]
else:
__magic_name__: Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def lowerCamelCase__ ( self : str , __snake_case : Optional[Any]=-1 ) -> Optional[Any]:
if c == -1:
__magic_name__: Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
__magic_name__: Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Any=-2 ) -> Tuple:
__magic_name__: Dict = deque()
__magic_name__: Optional[Any] = []
if s == -2:
__magic_name__: Any = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
__magic_name__: Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] ) -> List[Any]:
__magic_name__: Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__ ( self : int , __snake_case : Optional[int] ) -> Tuple:
return len(self.graph[u] )
def lowerCamelCase__ ( self : str , __snake_case : Optional[Any]=-2 ) -> int:
__magic_name__: int = []
__magic_name__: Union[str, Any] = []
if s == -2:
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: str = s
__magic_name__: Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_a ) != 0:
__magic_name__: Tuple = stack[len(_a ) - 1]
else:
__magic_name__: List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return sorted_nodes
def lowerCamelCase__ ( self : str ) -> Dict:
__magic_name__: List[Any] = []
__magic_name__: List[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Union[str, Any] = -2
__magic_name__: Union[str, Any] = []
__magic_name__: List[Any] = s
__magic_name__: Optional[Any] = False
__magic_name__: Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: str = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: Optional[Any] = True
if len(_a ) != 0:
__magic_name__: Optional[int] = stack[len(_a ) - 1]
else:
__magic_name__: int = False
indirect_parents.append(_a )
__magic_name__: str = s
__magic_name__: Optional[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def lowerCamelCase__ ( self : Dict ) -> int:
__magic_name__: List[Any] = []
__magic_name__: Optional[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Any = -2
__magic_name__: Any = []
__magic_name__: Optional[Any] = s
__magic_name__: Optional[int] = False
__magic_name__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: int = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: int = True
if len(_a ) != 0:
__magic_name__: List[str] = stack[len(_a ) - 1]
else:
__magic_name__: str = False
indirect_parents.append(_a )
__magic_name__: Union[str, Any] = s
__magic_name__: List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any]=-2 , __snake_case : Optional[int]=-1 ) -> Optional[int]:
__magic_name__: Optional[Any] = time()
self.dfs(_a , _a )
__magic_name__: List[str] = time()
return end - begin
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=-2 ) -> str:
__magic_name__: List[str] = time()
self.bfs(_a )
__magic_name__: Optional[Any] = time()
return end - begin
class __A :
def __init__( self : int ) -> Union[str, Any]:
__magic_name__: List[Any] = {}
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int]=1 ) -> List[str]:
if self.graph.get(_a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__magic_name__: str = [[w, v]]
# add the other way
if self.graph.get(_a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__magic_name__: str = [[w, u]]
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any , __snake_case : Optional[Any] ) -> Optional[int]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
# the other way round
if self.graph.get(_a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_a )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=-2 , __snake_case : Tuple=-1 ) -> Dict:
if s == d:
return []
__magic_name__: Optional[Any] = []
__magic_name__: Any = []
if s == -2:
__magic_name__: Any = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
__magic_name__: Optional[Any] = stack[len(_a ) - 1]
else:
__magic_name__: List[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def lowerCamelCase__ ( self : Any , __snake_case : Dict=-1 ) -> Any:
if c == -1:
__magic_name__: List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
__magic_name__: Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def lowerCamelCase__ ( self : Tuple , __snake_case : Any=-2 ) -> List[str]:
__magic_name__: Union[str, Any] = deque()
__magic_name__: Union[str, Any] = []
if s == -2:
__magic_name__: Optional[Any] = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
__magic_name__: Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : int , __snake_case : Tuple ) -> Union[str, Any]:
return len(self.graph[u] )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = []
__magic_name__: Any = []
__magic_name__: List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[Any] = -2
__magic_name__: Any = []
__magic_name__: Union[str, Any] = s
__magic_name__: Optional[int] = False
__magic_name__: Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: Optional[int] = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: Any = True
if len(_a ) != 0:
__magic_name__: Dict = stack[len(_a ) - 1]
else:
__magic_name__: Tuple = False
indirect_parents.append(_a )
__magic_name__: Optional[Any] = s
__magic_name__: str = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__: Any = []
__magic_name__: Optional[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[Any] = -2
__magic_name__: List[str] = []
__magic_name__: Union[str, Any] = s
__magic_name__: List[Any] = False
__magic_name__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: Union[str, Any] = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: int = True
if len(_a ) != 0:
__magic_name__: Union[str, Any] = stack[len(_a ) - 1]
else:
__magic_name__: List[Any] = False
indirect_parents.append(_a )
__magic_name__: Optional[Any] = s
__magic_name__: Optional[int] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
return list(self.graph )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[int]=-2 , __snake_case : Tuple=-1 ) -> Optional[int]:
__magic_name__: Union[str, Any] = time()
self.dfs(_a , _a )
__magic_name__: Dict = time()
return end - begin
def lowerCamelCase__ ( self : Dict , __snake_case : Dict=-2 ) -> Any:
__magic_name__: Optional[Any] = time()
self.bfs(_a )
__magic_name__: Any = time()
return end - begin
| 96
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a :str = 637_8137.0
a :Optional[Any] = 635_6752.31_4245
a :List[Any] = 6_378_137
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCamelCase : Union[str, Any] =False
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ):
'''simple docstring'''
return 12
@property
def _a ( self ):
'''simple docstring'''
return 12
@property
def _a ( self ):
'''simple docstring'''
return 32
@property
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCamelCase )
@property
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Union[str, Any] = 12
__A : List[Any] = 12
__A : Any = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__A : Any = TransformeraDModel(**__lowerCamelCase )
return model
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = 'cpu'
__A : Union[str, Any] = self.dummy_vqvae
__A : Any = self.dummy_text_encoder
__A : int = self.dummy_tokenizer
__A : int = self.dummy_transformer
__A : int = VQDiffusionScheduler(self.num_embed )
__A : int = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCamelCase )
__A : Any = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
__A : Union[str, Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : int = 'teddy bear playing in the pool'
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='np' )
__A : Dict = output.images
__A : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Dict = pipe(
[prompt] , generator=__lowerCamelCase , output_type='np' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
__A : List[Any] = image[0, -3:, -3:, -1]
__A : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__A : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = 'cpu'
__A : int = self.dummy_vqvae
__A : Optional[Any] = self.dummy_text_encoder
__A : Union[str, Any] = self.dummy_tokenizer
__A : Dict = self.dummy_transformer
__A : Optional[Any] = VQDiffusionScheduler(self.num_embed )
__A : Any = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__A : int = VQDiffusionPipeline(
vqvae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , transformer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
__A : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = 'teddy bear playing in the pool'
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Optional[Any] = pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='np' )
__A : Any = output.images
__A : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Optional[Any] = pipe(
[prompt] , generator=__lowerCamelCase , output_type='np' , return_dict=__lowerCamelCase , num_inference_steps=2 )[0]
__A : str = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__A : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
'''simple docstring'''
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__A : Optional[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__A : Dict = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=__lowerCamelCase , output_type='np' , )
__A : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 716
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger(__name__)
lowerCamelCase : Any ='''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCamelCase : Optional[int] ={
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : List[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Tuple = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : List[str] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : int = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__A : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : Dict = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
__A : Optional[Any] = {}
import re
__A : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Union[str, Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : int = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Optional[Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : str = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__A : str = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Dict = regex_match.groups()
__A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
__A : List[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__A : List[str] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : List[str] = prefix + resnet_block
__A : Optional[int] = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__A : Union[str, Any] = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
__A : List[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__A : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Optional[Any] = prefix + resnet_block
__A : List[Any] = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : str = regex_match.groups()
__A : str = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__A : int = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Tuple = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : List[Any] = regex_match.groups()
__A : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Any = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Any = {'1': 1, '3': 2}[groups[-2]]
__A : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__A : List[Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Tuple = prefix + resnet_block
__A : List[Any] = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : int = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__A : Optional[Any] = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
__A : List[Any] = original_key
__A : List[Any] = replace_key(_SCREAMING_SNAKE_CASE )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__A : Optional[int] = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__A : Optional[Any] = original_key
__A : Union[str, Any] = original_key
__A : Optional[int] = value
return new_dict
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None ) -> Any:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__A : int = requests.get(F'{PREFIX}{file}' , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=_SCREAMING_SNAKE_CASE )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
__A : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Optional[int] = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__A : int = JukeboxModel(_SCREAMING_SNAKE_CASE )
__A : int = []
__A : Tuple = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : Any = old_dic[k]
elif k.endswith('.w' ):
__A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Dict = old_dic[k]
else:
__A : str = old_dic[k]
__A : Dict = 'vqvae' if i == 0 else F'priors.{3 - i}'
__A : Dict = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
__A : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCamelCase : List[Any] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 237
| 0
|
'''simple docstring'''
def A_ ( _lowerCamelCase : list ):
if len(__A ) < 2:
return collection
def circle_sort_util(_lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : int ) -> bool:
_lowerCAmelCase = False
if low == high:
return swapped
_lowerCAmelCase = low
_lowerCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_lowerCAmelCase = (
collection[right],
collection[left],
)
_lowerCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowerCAmelCase = (
collection[right + 1],
collection[left],
)
_lowerCAmelCase = True
_lowerCAmelCase = low + int((high - low) / 2 )
_lowerCAmelCase = circle_sort_util(__A , __A , __A )
_lowerCAmelCase = circle_sort_util(__A , mid + 1 , __A )
return swapped or left_swap or right_swap
_lowerCAmelCase = True
while is_not_sorted is True:
_lowerCAmelCase = circle_sort_util(__A , 0 , len(__A ) - 1 )
return collection
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 309
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = "switch_transformers"
_SCREAMING_SNAKE_CASE : int = ["past_key_values"]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=32128 , _A=768 , _A=64 , _A=2048 , _A=64 , _A=12 , _A=3 , _A=12 , _A=3 , _A=12 , _A=8 , _A=False , _A=0.01 , _A="float32" , _A=False , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=0.001 , _A=0.001 , _A=1.0 , _A="relu" , _A=True , _A=False , _A=True , _A=0 , _A=1 , **_A , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Dict = d_kv
_UpperCAmelCase : str = d_ff
_UpperCAmelCase : int = num_sparse_encoder_layers
_UpperCAmelCase : Dict = num_layers
_UpperCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : List[Any] = num_experts
_UpperCAmelCase : List[str] = expert_capacity
_UpperCAmelCase : List[str] = router_bias
_UpperCAmelCase : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
_UpperCAmelCase : List[str] = router_dtype
_UpperCAmelCase : Any = router_ignore_padding_tokens
_UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
_UpperCAmelCase : Optional[int] = relative_attention_max_distance
_UpperCAmelCase : List[Any] = dropout_rate
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Union[str, Any] = initializer_factor
_UpperCAmelCase : int = feed_forward_proj
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = add_router_probs
_UpperCAmelCase : Optional[int] = router_z_loss_coef
_UpperCAmelCase : List[str] = router_aux_loss_coef
_UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split('''-''')
_UpperCAmelCase : int = act_info[-1]
_UpperCAmelCase : int = act_info[0] == '''gated'''
if len(_A) > 1 and act_info[0] != "gated" or len(_A) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 485
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def __A ( lowerCamelCase_ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : List[str] = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = set()
for token in tokens:
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = list(_lowerCamelCase )
return word_list
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokens
SCREAMING_SNAKE_CASE : List[Any] = 0, len(_lowerCamelCase )
while start < end:
SCREAMING_SNAKE_CASE : str = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : List[Any] = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : List[Any] = """##""" + bert_word[j]
SCREAMING_SNAKE_CASE : List[Any] = start + i
SCREAMING_SNAKE_CASE : int = False
break
if single_word:
start += 1
return bert_word
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(0 , len(_lowerCamelCase ) , 1_00 ):
SCREAMING_SNAKE_CASE : Tuple = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["""cws"""] ).cws
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(_lowerCamelCase ) , 1_00 ):
SCREAMING_SNAKE_CASE : Optional[int] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = []
for id in input_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE : Tuple = f.readlines()
SCREAMING_SNAKE_CASE : List[Any] = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[Any] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : Dict = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : Optional[int] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE : int = [json.dumps(_lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 719
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
__UpperCAmelCase = """▁"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 79
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowercase : Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : str=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=None , ) -> int:
'''simple docstring'''
lowercase : Dict =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[Any] =seq_length
lowercase : Tuple =is_training
lowercase : Dict =use_input_mask
lowercase : Any =use_token_type_ids
lowercase : int =use_labels
lowercase : int =vocab_size
lowercase : Dict =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : Tuple =hidden_act
lowercase : str =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : List[Any] =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : int =initializer_range
lowercase : int =num_labels
lowercase : Optional[int] =num_choices
lowercase : int =scope
lowercase : List[str] =embedding_size
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int =None
if self.use_input_mask:
lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
if self.use_token_type_ids:
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =None
lowercase : Optional[Any] =None
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Dict =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : int =TFMobileBertModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[Any] =model(UpperCAmelCase )
lowercase : Optional[Any] =[input_ids, input_mask]
lowercase : Union[str, Any] =model(UpperCAmelCase )
lowercase : List[Any] =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForMaskedLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Any =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =TFMobileBertForNextSentencePrediction(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Optional[int] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =TFMobileBertForPreTraining(config=UpperCAmelCase )
lowercase : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Tuple =TFMobileBertForSequenceClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Tuple =TFMobileBertForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Dict =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : List[str] =TFMobileBertForTokenClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase : Any =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase : Any =TFMobileBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : Any =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Dict =model(UpperCAmelCase )[0]
lowercase : Optional[int] =[1, 6, 3_0522]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Dict =tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 94
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """vivit"""
def __init__( self : int , __UpperCamelCase : Union[str, Any]=2_2_4 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : str=[2, 1_6, 1_6] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[Any]=3_0_7_2 , __UpperCamelCase : Optional[Any]="gelu_fast" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=1e-06 , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , )->Optional[int]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**__UpperCamelCase )
| 602
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__( A ): # picklable for multiprocessing
return x.sum()
def lowercase__( A ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case__ :
_lowerCAmelCase =42
_lowerCAmelCase =42
class snake_case__ ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : Any ):
snake_case__ : List[Any] = {}
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = 1
snake_case__ : Union[str, Any] = [1, 2]
snake_case__ : Dict = {'a': 1, 'b': 2}
snake_case__ : Any = {'a': [1, 2], 'b': [3, 4]}
snake_case__ : Union[str, Any] = {'a': {'1': 1}, 'b': 2}
snake_case__ : Union[str, Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
snake_case__ : List[str] = {}
snake_case__ : List[Any] = []
snake_case__ : Any = 2
snake_case__ : List[Any] = [2, 3]
snake_case__ : Optional[Any] = {'a': 2, 'b': 3}
snake_case__ : Optional[Any] = {'a': [2, 3], 'b': [4, 5]}
snake_case__ : Union[str, Any] = {'a': {'1': 2}, 'b': 3}
snake_case__ : Any = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
snake_case__ : Union[str, Any] = 2
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
snake_case__ : List[str] = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
snake_case__ : Any = {'a': 2, 'b': 0, 'c': 2}
snake_case__ : List[Any] = {
'a': np.eye(2 ).astype(_lowerCamelCase ),
'b': np.zeros(3 ).astype(_lowerCamelCase ),
'c': np.ones(2 ).astype(_lowerCamelCase ),
}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCamelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCamelCase : x + 1 , _lowerCamelCase , num_proc=_lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Any = {'a': 1, 'b': 2}
snake_case__ : Optional[Any] = {'a': 3, 'b': 4}
snake_case__ : List[str] = {'a': 5, 'b': 6}
snake_case__ : Optional[int] = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
class snake_case__ :
_lowerCAmelCase ='bar'
snake_case__ : Tuple = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_lowerCamelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def lowercase__( A , A , A ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
snake_case__ : Optional[Any] = {f'''{i}''': i for i in range(A )}
snake_case__ : List[Any] = map_nested(lambda A : x + 1_0 , A , num_proc=A , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case__ ( UpperCamelCase_ ):
@require_tf
def UpperCAmelCase__ ( self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
snake_case__ : List[str] = layers.Dense(2 )
def gen_random_output():
snake_case__ : Optional[Any] = tf.random.uniform((1, 3) )
return model(_lowerCamelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
snake_case__ : List[Any] = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
snake_case__ : Union[str, Any] = gen_random_output()
snake_case__ : Dict = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase__ ( self : Dict ):
import torch
def gen_random_output():
snake_case__ : List[Any] = torch.nn.Linear(3 , 2 )
snake_case__ : Any = torch.rand(1 , 3 )
return model(_lowerCamelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
snake_case__ : str = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
snake_case__ : Dict = gen_random_output()
snake_case__ : str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase__ ( self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
snake_case__ : int = gen_random_output()
with temp_seed(4_2 ):
snake_case__ : int = gen_random_output()
snake_case__ : Dict = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowercase__( A ):
snake_case__ : Any = NestedDataStructure(A ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowercase__( A , A ):
snake_case__ : Union[str, Any] = NestedDataStructure(A ).flatten()
assert output == expected_output
def lowercase__( ):
snake_case__ : Tuple = A(x=1 , y='foobar' )
snake_case__ : Optional[int] = {'x': 1, 'y': 'foobar'}
assert asdict(A ) == expected_output
snake_case__ : Dict = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
snake_case__ : str = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(A ) == expected_output
with pytest.raises(A ):
asdict([1, A(x=1_0 , y='foo' )] )
def lowercase__( A ):
return text.split()
def lowercase__( A ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__( ):
with Pool(2 ) as pool:
snake_case__ : Tuple = list(iflatmap_unordered(A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(A ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case__ : Optional[Any] = list(iflatmap_unordered(A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(A ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case__ : List[Any] = []
for yield_time, content in iflatmap_unordered(
A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(A )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(A ) == 4
| 711
|
import sys
from collections import defaultdict
class snake_case__ :
def __init__( self : List[Any] ):
snake_case__ : Dict = []
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Tuple ):
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str ):
snake_case__ : Union[str, Any] = pos
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : Optional[int] = 2 * start + 1
else:
snake_case__ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : int = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : str = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : int = temp, tempa
snake_case__ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _lowerCamelCase )
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = position[index]
while index != 0:
snake_case__ : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Dict = position[parent]
self.set_position(position[parent] , _lowerCamelCase )
else:
snake_case__ : Tuple = val
snake_case__ : Optional[Any] = temp
self.set_position(_lowerCamelCase , _lowerCamelCase )
break
snake_case__ : Optional[int] = parent
else:
snake_case__ : List[str] = val
snake_case__ : List[Any] = temp
self.set_position(_lowerCamelCase , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
snake_case__ : int = len(_lowerCamelCase ) // 2 - 1
for i in range(_lowerCamelCase , -1 , -1 ):
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
snake_case__ : Any = positions[0]
snake_case__ : List[str] = sys.maxsize
self.top_to_bottom(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase )
return temp
def lowercase__( A ):
snake_case__ : int = Heap()
snake_case__ : Optional[int] = [0] * len(A )
snake_case__ : Any = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Dict = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
snake_case__ : Tuple = []
snake_case__ : int = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
snake_case__ : Tuple = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : List[str] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
snake_case__ : Any = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
snake_case__ : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase : Union[str, Any] = int(input('Enter number of edges: ').strip())
lowerCamelCase : str = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class snake_case_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
snake_case__ = 'convnextv2'
def __init__(self: Tuple , __UpperCAmelCase: Tuple=3 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Union[str, Any]=4 , __UpperCAmelCase: Any=None , __UpperCAmelCase: str=None , __UpperCAmelCase: List[Any]="gelu" , __UpperCAmelCase: List[str]=0.02 , __UpperCAmelCase: List[str]=1E-12 , __UpperCAmelCase: Tuple=0.0 , __UpperCAmelCase: Optional[Any]=224 , __UpperCAmelCase: str=None , __UpperCAmelCase: Optional[Any]=None , **__UpperCAmelCase: Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__a : Tuple = num_channels
__a : Optional[int] = patch_size
__a : str = num_stages
__a : Optional[int] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
__a : List[str] = hidden_act
__a : List[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Optional[Any] = drop_path_rate
__a : Tuple = image_size
__a : str = ["stem"] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 351
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( *__A ) -> Dict:
'''simple docstring'''
if not isinstance(__A, __A ):
UpperCAmelCase__ = list(__A )
for i in range(len(__A ) ):
UpperCAmelCase__ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__A, __A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase_ ( __A = None, __A = 128 ) -> str:
'''simple docstring'''
if function is None:
return functools.partial(__A, starting_batch_size=__A )
UpperCAmelCase__ = starting_batch_size
def decorator(*__A, **__A ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase__ = list(inspect.signature(__A ).parameters.keys() )
# Guard against user error
if len(__A ) < (len(__A ) + 1):
UpperCAmelCase__ = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__A, *__A, **__A )
except Exception as e:
if should_reduce_batch_size(__A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 486
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Any = logging.get_logger(__name__)
def lowercase_ (A : str ):
snake_case__ : Tuple = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case__ : Dict = MaskFormerConfig(backbone_config=A )
snake_case__ : List[Any] = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
snake_case__ : Optional[Any] = 8_4_7
snake_case__ : Tuple = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
snake_case__ : int = 1_5_0
snake_case__ : int = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
snake_case__ : int = 1_7_1
snake_case__ : Any = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
snake_case__ : Dict = 1_3_3
snake_case__ : str = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
snake_case__ : str = 1_9
snake_case__ : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
snake_case__ : str = 6_5
snake_case__ : int = 'mapillary-vistas-id2label.json'
snake_case__ : Dict = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Tuple = {int(A ): v for k, v in idalabel.items()}
return config
def lowercase_ (A : List[str] ):
snake_case__ : str = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowercase_ (A : Optional[Any] , A : Tuple , A : int ):
snake_case__ : List[str] = dct.pop(A )
snake_case__ : Optional[int] = val
def lowercase_ (A : List[Any] , A : List[str] ):
snake_case__ : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : List[str] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case__ : Dict = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[:dim, :]
snake_case__ : Union[str, Any] = in_proj_bias[: dim]
snake_case__ : int = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
snake_case__ : Optional[int] = in_proj_weight[
-dim :, :
]
snake_case__ : Dict = in_proj_bias[-dim :]
# fmt: on
def lowercase_ (A : List[Any] , A : int ):
# fmt: off
snake_case__ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
snake_case__ : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[str] = in_proj_weight[: hidden_size, :]
snake_case__ : Optional[Any] = in_proj_bias[:config.hidden_size]
snake_case__ : Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : str = in_proj_weight[-hidden_size :, :]
snake_case__ : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
snake_case__ : Optional[int] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Tuple = in_proj_weight[: hidden_size, :]
snake_case__ : str = in_proj_bias[:config.hidden_size]
snake_case__ : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : int = in_proj_weight[-hidden_size :, :]
snake_case__ : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase_ ():
snake_case__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : int = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase_ (A : str , A : str , A : str , A : bool = False ):
snake_case__ : Optional[int] = get_maskformer_config(A )
# load original state_dict
with open(A , 'rb' ) as f:
snake_case__ : Tuple = pickle.load(A )
snake_case__ : Union[str, Any] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case__ : Union[str, Any] = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
snake_case__ : Any = torch.from_numpy(A )
# load 🤗 model
snake_case__ : Union[str, Any] = MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
snake_case__ , snake_case__ : Union[str, Any] = model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
snake_case__ : Dict = prepare_img()
if "vistas" in model_name:
snake_case__ : Optional[Any] = 6_5
elif "cityscapes" in model_name:
snake_case__ : Optional[Any] = 6_5_5_3_5
else:
snake_case__ : Optional[Any] = 2_5_5
snake_case__ : List[Any] = True if 'ade' in model_name else False
snake_case__ : Any = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
snake_case__ : Dict = image_processor(A , return_tensors='pt' )
snake_case__ : Dict = model(**A )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case__ : str = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ :List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 243
|
def lowercase_ (A : Optional[int]=2_8_1_2_3 ):
snake_case__ : Any = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case__ : Dict = set()
snake_case__ : Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(A )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 243
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : Any, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
A__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
A__ = F"""{src_lang}-{tgt_lang}"""
A__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_, "README.md" )
print(F"""Generating {path}""" )
with open(UpperCAmelCase_, "w", encoding="utf-8" ) as f:
f.write(UpperCAmelCase_ )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 104
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self : Dict , __a : List[str] ) -> Tuple:
super().__init__()
__UpperCAmelCase = model
__UpperCAmelCase = 2
__UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case__ ( self : int ) -> int:
pass
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
# load longformer model from model identifier
__UpperCAmelCase = LongformerModel.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase = LightningModel(UpperCamelCase__ )
__UpperCAmelCase = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 262
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __a ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = RoFormerTokenizer
UpperCAmelCase__ : Optional[Any] = RoFormerTokenizerFast
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = True
def __snake_case ( self ):
super().setUp()
def __snake_case ( self , **UpperCamelCase__ ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
SCREAMING_SNAKE_CASE_ : str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Any = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
SCREAMING_SNAKE_CASE_ : Dict = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : str = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def __snake_case ( self ):
pass
def __snake_case ( self ):
pass
def __snake_case ( self ):
pass
| 708
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = image_processor(UpperCamelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Dict = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = processor(text=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer(UpperCamelCase__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 97
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : List[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Any = "markuplm"
def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=2_1_6 , __UpperCAmelCase=1_0_0_1 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5_0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
# additional properties
__lowercase = max_depth
__lowercase = max_xpath_tag_unit_embeddings
__lowercase = max_xpath_subs_unit_embeddings
__lowercase = tag_pad_id
__lowercase = subs_pad_id
__lowercase = xpath_unit_hidden_size
| 566
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case : Optional[Any] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCamelCase ( __a :str = "laptop" ) -> DataFrame:
"""simple docstring"""
A__ = F'https://www.amazon.in/laptop/s?k={product}'
A__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
A__ = BeautifulSoup(requests.get(__a , headers=__a ).text )
# Initialize a Pandas dataframe with the column titles
A__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
A__ = item.ha.text
A__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
A__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
A__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
A__ = """Not available"""
try:
A__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
A__ = """"""
try:
A__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
A__ = float("""nan""" )
except AttributeError:
pass
A__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ = """ """
A__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A : str = '''headphones'''
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 247
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Any = 1_6
A : List[Any] = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> str:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Optional[Any] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :List[Any] , __a :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__a )
A__ , A__ = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
A__ = model(**__a )
A__ = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__a )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __a )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 247
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__UpperCAmelCase = [
"good first issue",
"feature request",
"wip",
]
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Tuple = Github(os.environ['GITHUB_TOKEN'] )
snake_case: Any = g.get_repo('huggingface/accelerate' )
snake_case: Dict = repo.get_issues(state='open' )
for issue in open_issues:
snake_case: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
snake_case: List[str] = comments[0] if len(__A ) > 0 else None
snake_case: Dict = dt.utcnow()
snake_case: Any = (current_time - issue.updated_at).days
snake_case: Union[str, Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 329
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("KEY")
__UpperCAmelCase = TypeVar("VAL")
@dataclass(frozen=snake_case , slots=snake_case )
class SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self ):
'''simple docstring'''
return False
__UpperCAmelCase = _DeletedItem()
class SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = 0.75 ):
'''simple docstring'''
snake_case: str = initial_block_size
snake_case: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case: List[Any] = capacity_factor
snake_case: int = 0
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = self._buckets[ind]
if not stored:
snake_case: Optional[Any] = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
snake_case: List[str] = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case: Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = self._buckets
snake_case: Optional[Any] = [None] * new_size
snake_case: Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case: int = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
snake_case: List[str] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
snake_case: str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
snake_case: Union[str, Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 329
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_0_0 ,lowerCamelCase_=1_3 ,lowerCamelCase_=3_0 ,lowerCamelCase_=2 ,lowerCamelCase_=3 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=3_2 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1_0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,) -> List[str]:
A = parent
A = vocab_size
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = FlaxBeitModel(config=lowerCamelCase_ )
A = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = FlaxBeitForMaskedImageModeling(config=lowerCamelCase_ )
A = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
A = self.type_sequence_label_size
A = FlaxBeitForImageClassification(config=lowerCamelCase_ )
A = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = FlaxBeitForImageClassification(lowerCamelCase_ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def UpperCamelCase__ ( self ) -> None:
A = FlaxBeitModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(lowerCamelCase_ )
A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ )
A = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ ,**lowerCamelCase_ ):
return model(pixel_values=lowerCamelCase_ ,**lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
A = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) ,len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> int:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
A = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCamelCase_ )
def _A ( ):
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=lowerCamelCase_ ,return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
A = np.ones((1, 1_9_6) ,dtype=lowerCamelCase_ )
# forward pass
A = model(pixel_values=lowerCamelCase_ ,bool_masked_pos=lowerCamelCase_ )
A = outputs.logits
# verify the logits
A = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowerCamelCase_ )
A = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowerCamelCase_ ,atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ) -> str:
A = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=lowerCamelCase_ ,return_tensors="""np""" )
# forward pass
A = model(**lowerCamelCase_ )
A = outputs.logits
# verify the logits
A = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowerCamelCase_ )
A = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,lowerCamelCase_ ,atol=1E-4 ) )
A = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
A = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=lowerCamelCase_ ,return_tensors="""np""" )
# forward pass
A = model(**lowerCamelCase_ )
A = outputs.logits
# verify the logits
A = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowerCamelCase_ )
A = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,lowerCamelCase_ ,atol=1E-4 ) )
A = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowerCamelCase_ )
| 255
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''umt5'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self ,lowerCamelCase_=2_5_0_1_1_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=8 ,lowerCamelCase_=None ,lowerCamelCase_=6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=1.0 ,lowerCamelCase_="gated-gelu" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="T5Tokenizer" ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def UpperCamelCase__ ( self ) -> Dict:
return self.d_model
@property
def UpperCamelCase__ ( self ) -> Any:
return self.num_heads
@property
def UpperCamelCase__ ( self ) -> int:
return self.num_layers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ) -> int:
return 1_3
@property
def UpperCamelCase__ ( self ) -> float:
return 5E-4
| 255
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = CpmAntTokenizer
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
super().setUp()
lowerCAmelCase__ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
lowerCAmelCase__ = "今天天气真好!"
lowerCAmelCase__ = ["今天", "天气", "真", "好", "!"]
lowerCAmelCase__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = "今天天气真好!"
lowerCAmelCase__ = [tokenizer.bos_token] + tokens
lowerCAmelCase__ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
lowerCAmelCase__ = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 61
|
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
a = set()
# Replace all the whitespace in our sentence
a = input_str.replace(''' ''', '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case_ ) == 2_6
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
a = [False] * 2_6
for char in input_str:
if char.islower():
a = True
elif char.isupper():
a = True
return all(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "The quick brown fox jumps over the lazy dog", ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''', setup=snake_case_ ) )
print(timeit('''is_pangram_faster()''', setup=snake_case_ ) )
print(timeit('''is_pangram_fastest()''', setup=snake_case_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 387
| 0
|
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
a__ = 0
a__ = str(lowercase__ )
while len(lowercase__ ) != 1:
a__ = [int(lowercase__ ) for i in num_string]
a__ = 1
for i in range(0 , len(lowercase__ ) ):
total *= numbers[i]
a__ = str(lowercase__ )
steps += 1
return steps
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
a__ = 0
a__ = str(lowercase__ )
while len(lowercase__ ) != 1:
a__ = [int(lowercase__ ) for i in num_string]
a__ = 0
for i in range(0 , len(lowercase__ ) ):
total += numbers[i]
a__ = str(lowercase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : int =logging.get_logger(__name__)
@add_end_docstrings(A_ )
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
a__ , a__ = {}, {}
if padding is not None:
a__ = padding
if truncation is not None:
a__ = truncation
if top_k is not None:
a__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
a__ = {"""image""": image, """question""": question}
else:
a__ = image
a__ = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def _A ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ):
'''simple docstring'''
a__ = load_image(inputs["""image"""] )
a__ = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
a__ = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = self.model(**lowerCamelCase )
return model_outputs
def _A ( self , lowerCamelCase , lowerCamelCase=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
a__ = self.model.config.num_labels
if self.framework == "pt":
a__ = model_outputs.logits.sigmoid()[0]
a__ , a__ = probs.topk(lowerCamelCase )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
a__ = scores.tolist()
a__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 412
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.