code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :str , *lowerCamelCase__ :Optional[Any] , **lowerCamelCase__ :Union[str, Any] ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __a ( self :List[str] , **lowerCamelCase__ :Any ):
UpperCamelCase__ :List[Any] = {}
if "threshold" in kwargs:
UpperCamelCase__ :int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self :List[str] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Any ):
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :str , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :str = load_image(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = torch.IntTensor([[image.height, image.width]] )
UpperCamelCase__ :Any = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
UpperCamelCase__ :List[str] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
UpperCamelCase__ :Dict = target_size
return inputs
def __a ( self :int , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Union[str, Any] = model_inputs.pop("""target_size""" )
UpperCamelCase__ :Optional[Any] = self.model(**lowerCamelCase__ )
UpperCamelCase__ :int = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
UpperCamelCase__ :List[str] = model_inputs["""bbox"""]
return model_outputs
def __a ( self :str , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any]=0.9 ):
UpperCamelCase__ :Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCamelCase__ :Tuple = target_size[0].tolist()
def unnormalize(lowerCamelCase__ :str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
UpperCamelCase__ :Union[str, Any] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCamelCase__ :Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCamelCase__ :Any = [unnormalize(lowerCamelCase__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
UpperCamelCase__ :int = ["""score""", """label""", """box"""]
UpperCamelCase__ :int = [dict(zip(lowerCamelCase__ , lowerCamelCase__ ) ) for vals in zip(scores.tolist() , lowerCamelCase__ , lowerCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCamelCase__ :Tuple = self.image_processor.post_process_object_detection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = raw_annotations[0]
UpperCamelCase__ :Optional[Any] = raw_annotation["""scores"""]
UpperCamelCase__ :Tuple = raw_annotation["""labels"""]
UpperCamelCase__ :List[Any] = raw_annotation["""boxes"""]
UpperCamelCase__ :str = scores.tolist()
UpperCamelCase__ :str = [self.model.config.idalabel[label.item()] for label in labels]
UpperCamelCase__ :str = [self._get_bounding_box(lowerCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCamelCase__ :Optional[int] = ["""score""", """label""", """box"""]
UpperCamelCase__ :Tuple = [
dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def __a ( self :Dict , lowerCamelCase__ :List[Any] ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
UpperCamelCase__ :Union[str, Any] = box.int().tolist()
UpperCamelCase__ :Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox | 45 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Dict = SwinvaConfig()
lowercase : Union[str, Any] = swinva_name.split("""_""" )
lowercase : Dict = name_split[1]
if "to" in name_split[3]:
lowercase : Any = int(name_split[3][-3:] )
else:
lowercase : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
lowercase : List[str] = int(name_split[2][-2:] )
else:
lowercase : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
lowercase : Optional[int] = 96
lowercase : Union[str, Any] = (2, 2, 6, 2)
lowercase : List[str] = (3, 6, 12, 24)
elif model_size == "small":
lowercase : Any = 96
lowercase : List[Any] = (2, 2, 18, 2)
lowercase : Tuple = (3, 6, 12, 24)
elif model_size == "base":
lowercase : Dict = 128
lowercase : Dict = (2, 2, 18, 2)
lowercase : int = (4, 8, 16, 32)
else:
lowercase : Optional[int] = 192
lowercase : Optional[Any] = (2, 2, 18, 2)
lowercase : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowercase : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowercase : Any = 21_841
lowercase : str = """huggingface/label-files"""
lowercase : Dict = """imagenet-22k-id2label.json"""
lowercase : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : str = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
lowercase : int = 1_000
lowercase : List[Any] = """huggingface/label-files"""
lowercase : Tuple = """imagenet-1k-id2label.json"""
lowercase : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : str = {v: k for k, v in idalabel.items()}
lowercase : Optional[Any] = img_size
lowercase : Optional[int] = num_classes
lowercase : Any = embed_dim
lowercase : List[str] = depths
lowercase : Union[str, Any] = num_heads
lowercase : List[Any] = window_size
return config
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if "patch_embed.proj" in name:
lowercase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowercase : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
lowercase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase : Optional[int] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase : str = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
lowercase : Tuple = """layernorm.weight"""
if name == "norm.bias":
lowercase : Any = """layernorm.bias"""
if "head" in name:
lowercase : int = name.replace("""head""" , """classifier""" )
else:
lowercase : int = """swinv2.""" + name
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
lowercase : Union[str, Any] = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
lowercase : str = int(key_split[3] )
lowercase : Any = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : int = val[dim : dim * 2, :]
lowercase : List[str] = val[-dim:, :]
else:
lowercase : List[str] = val[:dim]
lowercase : str = val[
dim : dim * 2
]
lowercase : str = val[-dim:]
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
lowercase : List[Any] = get_swinva_config(SCREAMING_SNAKE_CASE__ )
lowercase : Any = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : List[str] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : Tuple = timm_model(inputs["""pixel_values"""] )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Dict = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 336 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" ,init=__UpperCamelCase ,repr=__UpperCamelCase )
def __call__( self : Any ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case__ ( self : Dict ):
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" ,init=__UpperCamelCase ,repr=__UpperCamelCase )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Union[str, Any] ):
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case__ ( self : Tuple , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(a_ ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(a_ ) - lang_set ) )}) are not in valid set ({", ".join(a_ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(a_ , a_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase , __UpperCAmelCase : str = zip(*sorted(a_ ) )
return {"language": languages, "translation": translations}
def snake_case__ ( self : int ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 241 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a_ : Optional[int] , a_ : List[str]=2 , a_ : Optional[Any]=3 , a_ : str=4 , a_ : Optional[Any]=2 , a_ : List[Any]=7 , a_ : int=True , a_ : Optional[int]=True , a_ : List[Any]=True , a_ : Any=True , a_ : List[Any]=99 , a_ : Dict=36 , a_ : Any=2 , a_ : Any=4 , a_ : List[str]=37 , a_ : int="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : Any=5_12 , a_ : int=16 , a_ : List[str]=2 , a_ : Optional[int]=0.0_2 , a_ : Dict=6 , a_ : List[Any]=6 , a_ : Union[str, Any]=3 , a_ : Dict=4 , a_ : Union[str, Any]=None , a_ : Any=10_00 , ):
'''simple docstring'''
__UpperCAmelCase : str = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = coordinate_size
__UpperCAmelCase : Any = shape_size
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : Tuple = num_choices
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCAmelCase : Tuple = text_seq_length
__UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1
__UpperCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__UpperCAmelCase : Tuple = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCAmelCase : Any = bbox[i, j, 3]
__UpperCAmelCase : int = bbox[i, j, 1]
__UpperCAmelCase : Union[str, Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCAmelCase : List[Any] = bbox[i, j, 2]
__UpperCAmelCase : List[str] = bbox[i, j, 0]
__UpperCAmelCase : List[str] = tmp_coordinate
__UpperCAmelCase : Any = tf.constant(a_ )
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : str , a_ : List[Any] , a_ : int , a_ : Any , a_ : Tuple , a_ : str , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFLayoutLMvaModel(config=a_ )
# text + image
__UpperCAmelCase : Optional[Any] = model(a_ , pixel_values=a_ , training=a_ )
__UpperCAmelCase : Optional[Any] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , training=a_ , )
__UpperCAmelCase : Optional[Any] = model(a_ , bbox=a_ , pixel_values=a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCAmelCase : Union[str, Any] = model(a_ , training=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCAmelCase : Tuple = model({'''pixel_values''': pixel_values} , training=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case__ ( self : str , a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any] , a_ : Tuple , a_ : Union[str, Any] , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : List[str] = TFLayoutLMvaForSequenceClassification(config=a_ )
__UpperCAmelCase : List[Any] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , training=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , a_ : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Dict = TFLayoutLMvaForTokenClassification(config=a_ )
__UpperCAmelCase : List[Any] = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , training=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case__ ( self : Dict , a_ : Tuple , a_ : Optional[int] , a_ : List[str] , a_ : List[str] , a_ : List[Any] , a_ : Optional[int] , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : List[str] = TFLayoutLMvaForQuestionAnswering(config=a_ )
__UpperCAmelCase : Tuple = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , training=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = config_and_inputs
__UpperCAmelCase : List[str] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : List[Any] , a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int] , a_ : Optional[int] ):
'''simple docstring'''
return True
def snake_case__ ( self : Optional[Any] , a_ : Any , a_ : Union[str, Any] , a_ : Dict=False ):
'''simple docstring'''
__UpperCAmelCase : str = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
__UpperCAmelCase : Dict = {
k: tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
__UpperCAmelCase : List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a_ ):
__UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__UpperCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a_ ):
__UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a_ ):
__UpperCAmelCase : str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFLayoutLMvaModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(a_ )
if getattr(a_ , '''hf_compute_loss''' , a_ ):
# The number of elements in the loss should be the same as the number of elements in the label
__UpperCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ )
__UpperCAmelCase : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a_ )[0]
]
__UpperCAmelCase : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__UpperCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ )
__UpperCAmelCase : List[Any] = prepared_for_class.pop('''input_ids''' )
__UpperCAmelCase : Dict = model(a_ , **a_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__UpperCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ )
__UpperCAmelCase : int = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__UpperCAmelCase : int = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__UpperCAmelCase : int = -1_00
__UpperCAmelCase : Any = tf.convert_to_tensor(a_ )
__UpperCAmelCase : Optional[int] = model(a_ , **a_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ )
__UpperCAmelCase : Any = model(a_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__UpperCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ )
# Get keys that were added with the _prepare_for_class function
__UpperCAmelCase : Union[str, Any] = prepared_for_class.keys() - inputs_dict.keys()
__UpperCAmelCase : Dict = inspect.signature(model.call ).parameters
__UpperCAmelCase : Any = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__UpperCAmelCase : List[str] = {0: '''input_ids'''}
for label_key in label_keys:
__UpperCAmelCase : str = signature_names.index(a_ )
__UpperCAmelCase : str = label_key
__UpperCAmelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__UpperCAmelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__UpperCAmelCase : str = prepared_for_class[value]
__UpperCAmelCase : Union[str, Any] = tuple(a_ )
# Send to model
__UpperCAmelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a_ , a_ , a_ , a_ , a_ , a_ )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(a_ , a_ , a_ , a_ , a_ , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = TFLayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=a_ , return_tensors='''tf''' ).pixel_values
__UpperCAmelCase : List[Any] = tf.constant([[1, 2]] )
__UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__UpperCAmelCase : List[str] = model(input_ids=a_ , bbox=a_ , pixel_values=a_ , training=a_ )
# verify the logits
__UpperCAmelCase : List[Any] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , a_ )
__UpperCAmelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 241 | 1 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """informer"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 100 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
# time series specific configuration
A_ : Optional[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Tuple = loss
A_ : Dict = input_size
A_ : Union[str, Any] = num_time_features
A_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Tuple = num_static_real_features
A_ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = cardinality
else:
A_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = embedding_dimension
else:
A_ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : Dict = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Tuple = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Optional[int] = decoder_layers
A_ : List[str] = dropout
A_ : List[str] = attention_dropout
A_ : Any = activation_dropout
A_ : Any = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Optional[int] = use_cache
# Informer
A_ : Dict = attention_type
A_ : List[Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180 | 1 |
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Union[str, Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Union[str, Any]:
a__ : int = 0
a__ : Union[str, Any] = len(__UpperCamelCase ) # No of vertices in graph
a__ : List[str] = [0] * n
a__ : Union[str, Any] = [False] * n
def dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a__ : Union[str, Any] = True
a__ : Any = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , id_ )
a__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ : List[Any] = min(low[at] , low[to] )
a__ : List[str] = []
for i in range(__UpperCamelCase ):
if not visited[i]:
dfs(__UpperCamelCase , -1 , __UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> list:
if len(__UpperCamelCase ) == 0:
return []
a__ , a__ : int = min(__UpperCamelCase ), max(__UpperCamelCase )
a__ : Optional[int] = int(max_value - min_value ) + 1
a__ : list[list] = [[] for _ in range(__UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__UpperCamelCase )
return [v for bucket in buckets for v in sorted(__UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 207 | 0 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Optional[Any]=0.999 , _UpperCamelCase: Dict="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase: List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase: int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_snake_case = []
for i in range(_UpperCamelCase ):
_snake_case = i / num_diffusion_timesteps
_snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class _a ( __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = 1
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 1_000 ,_SCREAMING_SNAKE_CASE = 0.0_0_0_1 ,_SCREAMING_SNAKE_CASE = 0.0_2 ,_SCREAMING_SNAKE_CASE = "linear" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = "epsilon" ,_SCREAMING_SNAKE_CASE = 1.0 ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
if kwargs.get("set_alpha_to_one" ,_SCREAMING_SNAKE_CASE ) is not None:
_snake_case = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" ,"1.0.0" ,_SCREAMING_SNAKE_CASE ,standard_warn=_SCREAMING_SNAKE_CASE )
_snake_case = kwargs["set_alpha_to_one"]
if trained_betas is not None:
_snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ,dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case = torch.linspace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_SCREAMING_SNAKE_CASE ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_snake_case = 1.0 - self.betas
_snake_case = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_snake_case = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_snake_case = 1.0
# setable values
_snake_case = None
_snake_case = torch.from_numpy(np.arange(0 ,_SCREAMING_SNAKE_CASE ).copy().astype(np.intaa ) )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> torch.FloatTensor:
return sample
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Dict:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
_snake_case = num_inference_steps
_snake_case = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case = (np.arange(0 ,_SCREAMING_SNAKE_CASE ) * step_ratio).round().copy().astype(np.intaa )
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.timesteps += self.config.steps_offset
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 0.0 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_snake_case = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_snake_case = self.alphas_cumprod[timestep]
_snake_case = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_snake_case = model_output
elif self.config.prediction_type == "sample":
_snake_case = model_output
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_snake_case = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE ,pred_original_sample=_SCREAMING_SNAKE_CASE )
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 185 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : Optional[int] = object()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
_snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __a ( ) -> Any:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Any:
"""simple docstring"""
_snake_case = _get_partition_rules()
_snake_case = _replacement_rules(_UpperCamelCase )
_snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
_snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 185 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase : List[str] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 708 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ : List[Any] = np.zeros_like(A )
UpperCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_UpperCamelCase : str = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_UpperCamelCase : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_UpperCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_UpperCamelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_UpperCamelCase : int = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 216 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Distribution, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : Optional[Any]=0 ) -> List[Any]:
_A = 1.0 if scale is None else scale
_A = 0.0 if loc is None else loc
super().__init__(_snake_case, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=_snake_case )] )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self : str ) -> Any:
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return self.variance.sqrt()
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str, UpperCamelCase__ : int, UpperCamelCase__ : Dict[str, int], UpperCamelCase__ : Callable[..., Tuple[torch.Tensor]], **UpperCamelCase__ : Optional[Any] ) -> None:
super().__init__(**_snake_case )
_A = args_dim
_A = nn.ModuleList([nn.Linear(_snake_case, _snake_case ) for dim in args_dim.values()] )
_A = domain_map
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : torch.Tensor ) -> Tuple[torch.Tensor]:
_A = [proj(_snake_case ) for proj in self.proj]
return self.domain_map(*_snake_case )
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any], UpperCamelCase__ : str ) -> Optional[Any]:
super().__init__()
_A = function
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, *UpperCamelCase__ : Optional[int] ) -> Dict:
return self.function(_snake_case, *_snake_case )
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 4_2
__lowerCAmelCase = 4_2
__lowerCAmelCase = 4_2
def __init__( self : List[Any], UpperCamelCase__ : int = 1 ) -> None:
_A = dim
_A = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : str ) -> str:
if self.dim == 1:
return self.distribution_class(*_snake_case )
else:
return Independent(self.distribution_class(*_snake_case ), 1 )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[torch.Tensor] = None, UpperCamelCase__ : Optional[torch.Tensor] = None, ) -> Distribution:
_A = self._base_distribution(_snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_snake_case, loc=_snake_case, scale=_snake_case, event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self : List[str] ) -> int:
return len(self.event_shape )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
return 0.0
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int ) -> nn.Module:
return ParameterProjection(
in_features=_snake_case, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map ), )
def __UpperCAmelCase ( self : Union[str, Any], *UpperCamelCase__ : torch.Tensor ) -> int:
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( UpperCamelCase__ : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_snake_case ) + 4.0 )) / 2.0
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
__lowerCAmelCase = StudentT
@classmethod
def __UpperCAmelCase ( cls : Tuple, UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : torch.Tensor ) -> List[str]:
_A = cls.squareplus(_snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
_A = 2.0 + cls.squareplus(_snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = {"loc": 1, "scale": 1}
__lowerCAmelCase = Normal
@classmethod
def __UpperCAmelCase ( cls : Any, UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : torch.Tensor ) -> Any:
_A = cls.squareplus(_snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = {"total_count": 1, "logits": 1}
__lowerCAmelCase = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls : int, UpperCamelCase__ : torch.Tensor, UpperCamelCase__ : torch.Tensor ) -> Optional[Any]:
_A = cls.squareplus(_snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Optional[int] ) -> Distribution:
_A , _A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_snake_case, logits=_snake_case )
else:
return Independent(self.distribution_class(total_count=_snake_case, logits=_snake_case ), 1 )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[torch.Tensor] = None, UpperCamelCase__ : Optional[torch.Tensor] = None ) -> Distribution:
_A , _A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 107 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ : Any = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = ZeroShotClassificationPipeline(
model=_lowerCamelCase , tokenizer=_lowerCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE : Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
SCREAMING_SNAKE_CASE : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
SCREAMING_SNAKE_CASE : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE : Tuple = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowerCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier(_lowerCamelCase , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_lowerCamelCase , )
self.run_entailment_id(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE : List[str] = config.labelaid
SCREAMING_SNAKE_CASE : Optional[Any] = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE : Any = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE : List[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE : str = original_labelaid
self.assertEqual(_lowerCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE : Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE : int = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE : int = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 716 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.load(a__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : str = v
else:
SCREAMING_SNAKE_CASE : Dict = v
SCREAMING_SNAKE_CASE : int = chkpt['''params''']
SCREAMING_SNAKE_CASE : Tuple = {n: v for n, v in config.items() if not isinstance(a__ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : List[str] = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : Optional[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ , a__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 333 | 0 |
"""simple docstring"""
from math import sqrt
def _snake_case ( snake_case__ : int ):
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' must been an int and positive"
A = True
# 0 and 1 are none primes.
if number <= 1:
A = False
for divisor in range(2 , int(round(sqrt(snake_case__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
A = False
break
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'status' must been from type bool"
return status
def _snake_case ( snake_case__ : List[str] ):
assert isinstance(snake_case__ , snake_case__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
A = list(range(2 , n + 1 ) )
A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
A = 0
# filters actual prime numbers.
A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def _snake_case ( snake_case__ : int ):
assert isinstance(snake_case__ , snake_case__ ) and (n > 2), "'N' must been an int and > 2"
A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(snake_case__ ):
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def _snake_case ( snake_case__ : str ):
assert isinstance(snake_case__ , snake_case__ ) and number >= 0, "'number' must been an int and >= 0"
A = [] # this list will be returns of the function.
# potential prime number factors.
A = 2
A = number
if number == 0 or number == 1:
ans.append(snake_case__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case__ ):
while quotient != 1:
if is_prime(snake_case__ ) and (quotient % factor == 0):
ans.append(snake_case__ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def _snake_case ( snake_case__ : str ):
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(snake_case__ )
A = max(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type int"
return ans
def _snake_case ( snake_case__ : Tuple ):
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
A = 0
# prime factorization of 'number'
A = prime_factorization(snake_case__ )
A = min(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type int"
return ans
def _snake_case ( snake_case__ : Any ):
assert isinstance(snake_case__ , snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , snake_case__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( snake_case__ : List[str] ):
assert isinstance(snake_case__ , snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , snake_case__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( snake_case__ : int ):
assert (
isinstance(snake_case__ , snake_case__ ) and (number > 2) and is_even(snake_case__ )
), "'number' must been an int, even and > 2"
A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
A = get_prime_numbers(snake_case__ )
A = len(snake_case__ )
# run variable for while-loops.
A = 0
A = None
# exit variable. for break up the loops
A = True
while i < len_pn and loop:
A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (len(snake_case__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] ):
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
A = 0
while numbera != 0:
A = numbera % numbera
A = numbera
A = rest
# precondition
assert isinstance(snake_case__ , snake_case__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str] ):
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
A = prime_factorization(snake_case__ )
A = prime_factorization(snake_case__ )
elif numbera == 1 or numbera == 1:
A = []
A = []
A = max(snake_case__ , snake_case__ )
A = 0
A = 0
A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
A = prime_fac_a.count(snake_case__ )
A = prime_fac_a.count(snake_case__ )
for _ in range(max(snake_case__ , snake_case__ ) ):
ans *= n
else:
A = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
A = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( snake_case__ : int ):
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'number' must been a positive int"
A = 0
A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case__ ):
ans += 1
# precondition
assert isinstance(snake_case__ , snake_case__ ) and is_prime(
snake_case__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( snake_case__ : int , snake_case__ : Any ):
assert (
is_prime(snake_case__ ) and is_prime(snake_case__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
A = p_number_a + 1 # jump to the next number
A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
while number < p_number_a:
ans.append(snake_case__ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and ans[0] != p_number_a
and ans[len(snake_case__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( snake_case__ : int ):
assert isinstance(snake_case__ , snake_case__ ) and (n >= 1), "'n' must been int and >= 1"
A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(snake_case__ )
# precondition
assert ans[0] == 1 and ans[len(snake_case__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( snake_case__ : Tuple ):
assert isinstance(snake_case__ , snake_case__ ) and (
number > 1
), "'number' must been an int and >= 1"
A = get_divisors(snake_case__ )
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (divisors[0] == 1)
and (divisors[len(snake_case__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( snake_case__ : str , snake_case__ : List[str] ):
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
A = gcd(abs(snake_case__ ) , abs(snake_case__ ) )
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( snake_case__ : Any ):
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'n' must been a int and >= 0"
A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( snake_case__ : List[str] ):
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'n' must been an int and >= 0"
A = 0
A = 1
A = 1 # this will be return
for _ in range(n - 1 ):
A = ans
ans += fiba
A = tmp
return ans | 91 |
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
a__ = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
a__ = -1
return False
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
a__ = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 489 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure) | 706 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor | 641 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 50_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
lowerCamelCase_ = primea * primea
for primea in primes:
lowerCamelCase_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ = primea * primea * primea * primea
lowerCamelCase_ = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''')
def __snake_case ( self , a__=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __snake_case ( self , a__):
"""simple docstring"""
TrainingJobAnalytics(a__).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__)
| 114 | 0 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase:
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = data
SCREAMING_SNAKE_CASE_ :List[str] = None
def __repr__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
SCREAMING_SNAKE_CASE_ :Tuple = self
while temp:
string_rep.append(f'{temp.data}' )
SCREAMING_SNAKE_CASE_ :Tuple = temp.next
return "->".join(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
if not elements_list:
raise Exception('The Elements List is empty' )
SCREAMING_SNAKE_CASE_ :Optional[int] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ :Dict = Node(elements_list[i] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = current.next
return head
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def SCREAMING_SNAKE_CASE__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ :Dict = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : str = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ :Optional[Any] = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE , not kwargs.pop(SCREAMING_SNAKE_CASE ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE_ :List[str] = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE_ :Dict = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE_ :str = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE )
__snake_case : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Trace the models using torchscript'} )
__snake_case : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__snake_case : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def _lowercase ( self : str ):
"""simple docstring"""
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE_ :Tuple = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ :List[str] = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE_ :Dict = xm.xla_device()
SCREAMING_SNAKE_CASE_ :Tuple = 0
else:
SCREAMING_SNAKE_CASE_ :int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ :Tuple = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self : str ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self : int ):
"""simple docstring"""
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _lowercase ( self : Dict ):
"""simple docstring"""
return self.n_gpu > 0
| 233 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = '''transfo-xl'''
UpperCamelCase_ : List[str] = ['''mems''']
UpperCamelCase_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=26_7735 , UpperCAmelCase_ : str=[2_0000, 4_0000, 20_0000] , UpperCAmelCase_ : Optional[int]=1024 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=4096 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=18 , UpperCAmelCase_ : List[str]=1600 , UpperCAmelCase_ : Optional[Any]=1000 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : int=-1 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="normal" , UpperCAmelCase_ : Optional[Any]=0.01 , UpperCAmelCase_ : Any=0.01 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Dict=1E-5 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = []
self.cutoffs.extend(UpperCAmelCase_ )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE : List[Any] = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE : Dict = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : Optional[int] = d_embed
SCREAMING_SNAKE_CASE : Optional[Any] = d_head
SCREAMING_SNAKE_CASE : Any = d_inner
SCREAMING_SNAKE_CASE : str = div_val
SCREAMING_SNAKE_CASE : str = pre_lnorm
SCREAMING_SNAKE_CASE : Optional[int] = n_layer
SCREAMING_SNAKE_CASE : Optional[int] = n_head
SCREAMING_SNAKE_CASE : List[str] = mem_len
SCREAMING_SNAKE_CASE : Optional[int] = same_length
SCREAMING_SNAKE_CASE : Optional[int] = attn_type
SCREAMING_SNAKE_CASE : List[Any] = clamp_len
SCREAMING_SNAKE_CASE : Dict = sample_softmax
SCREAMING_SNAKE_CASE : List[str] = adaptive
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = dropatt
SCREAMING_SNAKE_CASE : str = untie_r
SCREAMING_SNAKE_CASE : str = init
SCREAMING_SNAKE_CASE : List[Any] = init_range
SCREAMING_SNAKE_CASE : Union[str, Any] = proj_init_std
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
super().__init__(eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Any ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 62 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( ):
from torch.utils.cpp_extension import load
UpperCamelCase__ : Tuple = Path(UpperCamelCase__ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
UpperCamelCase__ : Any = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCamelCase__ , with_cuda=UpperCamelCase__ , extra_include_paths=[str(UpperCamelCase__ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 462 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = to_pil_image(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = pil_image.size
UpperCamelCase__ : str = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase__ : Optional[Any] = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
UpperCamelCase__ : int = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : int = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : Dict = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : str = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
UpperCamelCase__ : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_5_5 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "" , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : Optional[int] = size
UpperCamelCase__ : Optional[Any] = resample
UpperCamelCase__ : Optional[Any] = do_rescale
UpperCamelCase__ : str = rescale_value
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : List[Any] = ocr_lang
UpperCamelCase__ : str = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ : int = (size['''height'''], size['''width'''])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : List[Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ ,UpperCamelCase__ : Tuple = apply_tesseract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
words_batch.append(__SCREAMING_SNAKE_CASE )
boxes_batch.append(__SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : str = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : str = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : str = words_batch
UpperCamelCase__ : Union[str, Any] = boxes_batch
return data
| 462 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 453 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Optional[Any]:
if hor == 1_2_8:
lowerCAmelCase__ : str = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCAmelCase__ : int = (3_2, 1_2_8, 2_5_6)
lowerCAmelCase__ : Optional[Any] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
lowerCAmelCase__ : Any = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCAmelCase__ : Dict = (3_2, 6_4, 1_2_8, 2_5_6)
lowerCAmelCase__ : Optional[Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowerCAmelCase__ : Tuple = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowerCAmelCase__ : str = model.state_dict()
lowerCAmelCase__ : Union[str, Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowerCAmelCase__ : str = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ : Dict = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ : Tuple = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCAmelCase__ : Dict = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowerCAmelCase__ : Any = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
lowerCAmelCase__ : Optional[int] = model
lowerCAmelCase__ : Any = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowerCAmelCase__ : str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ : List[Any] = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 453 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _SCREAMING_SNAKE_CASE ( ) -> tuple[list[int], int]:
__lowerCAmelCase : Tuple = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowerCAmelCase : Optional[int] = randint(-5_000 , 5_000 )
return (arr, r)
_UpperCAmelCase = make_dataset()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE , 3 ):
if sum(SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> tuple[int, int, int]:
arr.sort()
__lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( ) -> tuple[float, float]:
__lowerCAmelCase : Union[str, Any] = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowerCAmelCase : Tuple = """
triplet_sum1(*dataset)
"""
__lowerCAmelCase : Optional[Any] = """
triplet_sum2(*dataset)
"""
__lowerCAmelCase : Tuple = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10_000 )
__lowerCAmelCase : List[str] = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE ), min(SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''') | 240 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 100 ) -> int:
__lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCAmelCase : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''') | 240 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def __snake_case ( UpperCAmelCase_ : Callable[[float], float] , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
lowerCamelCase_ = xa
lowerCamelCase_ = xa
while True:
if x_n == x_na or function(UpperCAmelCase_ ) == function(UpperCAmelCase_ ):
raise ZeroDivisionError("float division by zero, could not find root" )
lowerCamelCase_ = x_na - (
function(UpperCAmelCase_ ) / ((function(UpperCAmelCase_ ) - function(UpperCAmelCase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase_ = x_na
lowerCamelCase_ = x_na
def __snake_case ( UpperCAmelCase_ : float ):
return math.pow(UpperCAmelCase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 675 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 1 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( _lowerCAmelCase : Any ) -> list[int]:
UpperCAmelCase : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Any = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] = 800800 , _lowerCAmelCase : Tuple = 800800 ) -> int:
UpperCAmelCase : List[Any] = degree * loga(_lowerCAmelCase )
UpperCAmelCase : int = int(_lowerCAmelCase )
UpperCAmelCase : int = calculate_prime_numbers(_lowerCAmelCase )
UpperCAmelCase : Any = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase__: List[Any] = logging.getLogger(__name__)
def snake_case_ ( ) -> int:
UpperCAmelCase : int = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_lowerCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_lowerCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_lowerCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_lowerCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_lowerCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_lowerCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_lowerCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCAmelCase : List[str] = parser.parse_args()
return args
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
def fn(_lowerCAmelCase : Tuple ):
return tokenizer(examples['''text'''] )
return fn
def snake_case_ ( _lowerCAmelCase : int ) -> Dict:
UpperCAmelCase : Optional[Any] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCAmelCase : Optional[Any] = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCAmelCase : Optional[Any] = tf.train.Features(feature=_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = tf.train.Example(features=_lowerCAmelCase )
UpperCAmelCase : Any = example.SerializeToString()
records.append(_lowerCAmelCase )
return records
def snake_case_ ( _lowerCAmelCase : List[str] ) -> List[str]:
UpperCAmelCase : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase : List[Any] = min(len(_lowerCAmelCase ) , args.limit )
UpperCAmelCase : Dict = dataset.select(range(_lowerCAmelCase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
else:
UpperCAmelCase : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase : Optional[Any] = tokenize_function(_lowerCAmelCase )
UpperCAmelCase : int = dataset.map(_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCAmelCase : List[str] ):
# Concatenate all texts.
UpperCAmelCase : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase : str = dataset_tokenized.map(_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=1000 , num_proc=4 )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Dict = 0
for shard in range(0 , len(_lowerCAmelCase ) , args.shard_size ):
UpperCAmelCase : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase : Optional[int] = len(dataset_snapshot['''input_ids'''] )
UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCAmelCase : List[Any] = get_serialized_examples(_lowerCAmelCase )
with tf.io.TFRecordWriter(_lowerCAmelCase ) as out_file:
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase : Any = serialized_examples[i]
out_file.write(_lowerCAmelCase )
print('''Wrote file {} containing {} records'''.format(_lowerCAmelCase , _lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: List[Any] = parse_args()
main(args)
| 528 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase : Optional[Any] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 149 |
def _snake_case( SCREAMING_SNAKE_CASE__ = 4_000_000 ) -> int:
lowercase : List[str] = [0, 1]
lowercase : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
A_ = VideoClassificationPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , top_k=2 )
A_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
for example in examples:
A_ = video_classifier(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'''score''': ANY(_SCREAMING_SNAKE_CASE ), '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': ANY(_SCREAMING_SNAKE_CASE ), '''label''': ANY(_SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def __A ( self ) -> Tuple:
A_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
A_ = pipeline(
'''video-classification''' , model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , frame_sampling_rate=4 )
A_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
A_ = video_classifier(_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
A_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __A ( self ) -> Union[str, Any]:
pass
| 174 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCamelCase )
if number < 0:
return False
A_ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCamelCase = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__UpperCamelCase = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> dict[str, int]:
SCREAMING_SNAKE_CASE = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase (SCREAMING_SNAKE_CASE_ : tuple ) -> str:
return x[0]
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> str:
SCREAMING_SNAKE_CASE = get_letter_count(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = ''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> int:
SCREAMING_SNAKE_CASE = get_frequency_order(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
SCREAMING_SNAKE_CASE = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
__UpperCamelCase = input('''Enter a string ''').strip()
__UpperCamelCase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 247 | 1 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["image_processor"]
UpperCAmelCase__ = "SamImageProcessor"
def __init__( self : str , __snake_case : Union[str, Any] ) -> str:
super().__init__(__snake_case )
__magic_name__: List[Any] = self.image_processor
__magic_name__: Optional[int] = -1_0
__magic_name__: Dict = self.image_processor.size["""longest_edge"""]
def __call__( self : List[Any] , __snake_case : Tuple=None , __snake_case : Any=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ) -> BatchEncoding:
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__magic_name__: int = encoding_image_processor["""original_sizes"""]
if hasattr(__snake_case , """numpy""" ): # Checks if Torch or TF tensor
__magic_name__: Optional[Any] = original_sizes.numpy()
__magic_name__, __magic_name__, __magic_name__: Any = self._check_and_preprocess_points(
input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , )
__magic_name__: Optional[int] = self._normalize_and_convert(
__snake_case , __snake_case , input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , return_tensors=__snake_case , )
return encoding_image_processor
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]="pt" , ) -> Any:
if input_points is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: str = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] ) for point in input_points
]
else:
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case )
for point, original_size in zip(__snake_case , __snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__magic_name__, __magic_name__: Tuple = self._pad_points_and_labels(__snake_case , __snake_case )
__magic_name__: Tuple = np.array(__snake_case )
if input_labels is not None:
__magic_name__: List[Any] = np.array(__snake_case )
if input_boxes is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] , is_bounding_box=__snake_case )
for box in input_boxes
]
else:
__magic_name__: List[Any] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case , is_bounding_box=__snake_case )
for box, original_size in zip(__snake_case , __snake_case )
]
__magic_name__: int = np.array(__snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__magic_name__: List[Any] = tf.convert_to_tensor(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Optional[Any] = tf.expand_dims(__snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[int] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__magic_name__: Dict = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: Union[str, Any] = tf.expand_dims(__snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__magic_name__: Union[str, Any] = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: int = tf.expand_dims(__snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Dict ) -> Optional[int]:
__magic_name__: Union[str, Any] = max([point.shape[0] for point in input_points] )
__magic_name__: Any = []
for i, point in enumerate(__snake_case ):
if point.shape[0] != expected_nb_points:
__magic_name__: Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__magic_name__: str = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__snake_case )
__magic_name__: str = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : np.ndarray , __snake_case : Tuple , __snake_case : List[str]=False ) -> np.ndarray:
__magic_name__, __magic_name__: Any = original_size
__magic_name__, __magic_name__: Tuple = self.image_processor._get_preprocess_shape(__snake_case , longest_edge=__snake_case )
__magic_name__: List[str] = deepcopy(__snake_case ).astype(__snake_case )
if is_bounding_box:
__magic_name__: List[str] = coords.reshape(-1 , 2 , 2 )
__magic_name__: str = coords[..., 0] * (new_w / old_w)
__magic_name__: int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__magic_name__: str = coords.reshape(-1 , 4 )
return coords
def lowerCamelCase__ ( self : int , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Dict:
if input_points is not None:
if hasattr(__snake_case , """numpy""" ): # Checks for TF or Torch tensor
__magic_name__: Union[str, Any] = input_points.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_points[0] , __snake_case ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__magic_name__: Dict = [np.array(__snake_case ) for input_point in input_points]
else:
__magic_name__: str = None
if input_labels is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Optional[int] = input_labels.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_labels[0] , __snake_case ):
raise ValueError("""Input labels must be a list of list integers.""" )
__magic_name__: Tuple = [np.array(__snake_case ) for label in input_labels]
else:
__magic_name__: str = None
if input_boxes is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Tuple = input_boxes.numpy().tolist()
if (
not isinstance(__snake_case , __snake_case )
or not isinstance(input_boxes[0] , __snake_case )
or not isinstance(input_boxes[0][0] , __snake_case )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__magic_name__: List[Any] = [np.array(__snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__magic_name__: List[str] = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: int = self.image_processor.model_input_names
return list(dict.fromkeys(__snake_case ) )
def lowerCamelCase__ ( self : Any , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Optional[Any]:
return self.image_processor.post_process_masks(*__snake_case , **__snake_case )
| 213 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '▁'
__lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCamelCase = {
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
__lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["input_ids", "attention_mask"]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : int , __snake_case : str , __snake_case : Tuple=None , __snake_case : Dict=None , __snake_case : Union[str, Any]="</s>" , __snake_case : int="</s>" , __snake_case : int="<s>" , __snake_case : Tuple="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : Tuple="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__: Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__magic_name__: List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__: str = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__magic_name__: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
__magic_name__: Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__: str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__: List[Any] = 1
__magic_name__: List[Any] = len(self.sp_model )
__magic_name__: Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
__magic_name__: Any = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__: Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__: Any = src_lang if src_lang is not None else """en_XX"""
__magic_name__: Dict = self.lang_code_to_id[self._src_lang]
__magic_name__: Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str ) -> None:
__magic_name__: int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Dict:
__magic_name__: int = self.__dict__.copy()
__magic_name__: List[str] = None
return state
def __setstate__( self : Any , __snake_case : Dict ) -> None:
__magic_name__: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__: Optional[Any] = {}
__magic_name__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__magic_name__: List[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : List[str] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCamelCase__ ( self : int , __snake_case : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__: Optional[Any] = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
__magic_name__: str = []
__magic_name__: Dict = """"""
__magic_name__: Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
__magic_name__: Dict = True
__magic_name__: Optional[Any] = []
else:
current_sub_tokens.append(__snake_case )
__magic_name__: Union[str, Any] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__: Optional[int] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
__magic_name__: str = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowerCamelCase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__magic_name__: List[Any] = [1] * len(self.prefix_tokens )
__magic_name__: Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Tuple ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__: Union[str, Any] = src_lang
__magic_name__: int = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = self.convert_tokens_to_ids(__snake_case )
__magic_name__: int = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[str] , __snake_case : str = "en_XX" , __snake_case : Optional[List[str]] = None , __snake_case : str = "ro_RO" , **__snake_case : List[Any] , ) -> BatchEncoding:
__magic_name__: List[Any] = src_lang
__magic_name__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Any ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Any , __snake_case : str ) -> None:
__magic_name__: Any = self.lang_code_to_id[src_lang]
__magic_name__: str = [self.cur_lang_code_id]
__magic_name__: Tuple = [self.eos_token_id]
def lowerCamelCase__ ( self : Tuple , __snake_case : str ) -> None:
__magic_name__: int = self.lang_code_to_id[tgt_lang]
__magic_name__: Dict = [self.cur_lang_code_id]
__magic_name__: Optional[int] = [self.eos_token_id]
| 213 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__a = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
__a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCAmelCase ( a_: tuple ):
return x[0]
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : Any = get_letter_count(a_ )
_UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a_ )
_UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=a_ )
_UpperCAmelCase : int = "".join(freq_to_letter[freq] )
_UpperCAmelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a_, reverse=a_ )
_UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a_ )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : List[str] = get_frequency_order(a_ )
_UpperCAmelCase : List[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 1 |
def _A( UpperCamelCase__ : int = 50 ) -> int:
'''simple docstring'''
__lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase = BlipaProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] , **lowerCamelCase__ : Any ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def UpperCAmelCase_ ( self : Dict , **lowerCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCamelCase__ )
__lowercase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCamelCase__ )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 362 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = job['''started_at''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = job['''completed_at''']
SCREAMING_SNAKE_CASE__ : Dict = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
SCREAMING_SNAKE_CASE__ : Any = start
SCREAMING_SNAKE_CASE__ : Dict = end
SCREAMING_SNAKE_CASE__ : Optional[int] = duration_in_min
return job_info
def a ( A__ , A__=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE__ : int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
SCREAMING_SNAKE_CASE__ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : int = requests.get(url + f"""&page={i + 2}""" , headers=A__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a_ :Any = parser.parse_args()
a_ :str = get_job_time(args.workflow_run_id)
a_ :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Any = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 240 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 240 | 1 |
def lowerCAmelCase_ ( lowerCamelCase ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case = False ) -> str:
if not isinstance(snake_case , snake_case ):
__lowercase = F"Expected string as input, found {type(snake_case )}"
raise ValueError(snake_case )
if not isinstance(snake_case , snake_case ):
__lowercase = F"Expected boolean as use_pascal parameter, found {type(snake_case )}"
raise ValueError(snake_case )
__lowercase = input_str.split('_' )
__lowercase = 0 if use_pascal else 1
__lowercase = words[start_index:]
__lowercase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowercase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 375 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__magic_name__ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__magic_name__ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = SavedModel()
lowerCamelCase__ = []
with open(os.path.join(__lowercase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
lowerCamelCase__ = json.load(__lowercase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
lowerCamelCase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCamelCase__ = sorted(__lowercase )
lowerCamelCase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowercase , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
__magic_name__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_lowercase = tempfile.mkdtemp()
# fmt: off
_lowercase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase = {'unk_token': '<unk>'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
_lowercase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
_lowercase = os.path.join(self.tmpdirname ,__A )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__A ,__A )
def __UpperCAmelCase ( self : int ,**__A : Optional[Any] ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Optional[int] ,**__A : List[str] ) -> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Dict ,**__A : int ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : int ) -> str:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_lowercase = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_lowercase = [Image.fromarray(np.moveaxis(__A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Tuple ) -> str:
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
_lowercase = self.get_image_processor()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__A )
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__A )
self.assertIsInstance(processor_fast.tokenizer ,__A )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__A )
self.assertIsInstance(processor_fast.image_processor ,__A )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
_lowercase = self.get_image_processor(do_normalize=__A ,padding_value=1.0 )
_lowercase = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__A )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = self.prepare_image_inputs()
_lowercase = image_processor(__A ,return_tensors='np' )
_lowercase = processor(images=__A ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'lower newer'
_lowercase = processor(text=__A )
_lowercase = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __UpperCAmelCase ( self : str ) -> Dict:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'lower newer'
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__A ,images=__A )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = self.prepare_image_inputs()
_lowercase = self.prepare_image_inputs()
_lowercase = processor(images=__A ,visual_prompt=__A )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase = processor.batch_decode(__A )
_lowercase = tokenizer.batch_decode(__A )
self.assertListEqual(__A ,__A ) | 67 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size if size is not None else {'''height''': 1_8, '''width''': 2_0}
__lowercase = do_thumbnail
__lowercase = do_align_axis
__lowercase = do_pad
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def A ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = DonutImageProcessingTester(self )
@property
def A ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_pad''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 639 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="xmod"
def __init__( self , lowerCamelCase=3_0522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("en_XX",) , lowerCamelCase=None , **lowerCamelCase , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
__a = pre_norm
__a = adapter_reduction_factor
__a = adapter_layer_norm
__a = adapter_reuse_layer_norm
__a = ln_before_adapter
__a = list(lowerCamelCase )
__a = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def __UpperCamelCase ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 270 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vivit"
def __init__( self , lowerCamelCase=224 , lowerCamelCase=32 , lowerCamelCase=[2, 16, 16] , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_fast" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**lowerCamelCase ) | 270 | 1 |
import baseaa
def snake_case__ ( lowercase ):
return baseaa.baaencode(string.encode("utf-8" ) )
def snake_case__ ( lowercase ):
return baseaa.baadecode(lowercase ).decode("utf-8" )
if __name__ == "__main__":
a : str = """Hello World!"""
a : Tuple = baseaa_encode(test)
print(encoded)
a : str = baseaa_decode(encoded)
print(decoded) | 613 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: torch.FloatTensor
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ):
super().__init__()
lowerCAmelCase_: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_: Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
lowerCAmelCase_: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_: Tuple = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
lowerCAmelCase_: Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_: Tuple = block_out_channels[0] * 4
lowerCAmelCase_: Any = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
lowerCAmelCase_: str = nn.ModuleList([] )
lowerCAmelCase_: Dict = None
lowerCAmelCase_: Optional[Any] = nn.ModuleList([] )
lowerCAmelCase_: int = None
# down
lowerCAmelCase_: List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = output_channel
lowerCAmelCase_: Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_: List[str] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
lowerCAmelCase_: Optional[int] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
lowerCAmelCase_: Dict = list(reversed(lowerCamelCase__ ) )
lowerCAmelCase_: Any = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_: str = out_channels
else:
lowerCAmelCase_: Any = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Dict = output_channel
lowerCAmelCase_: int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
lowerCAmelCase_: Optional[int] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: Union[str, Any] = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
lowerCAmelCase_: str = output_channel
# out
lowerCAmelCase_: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_: int = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
lowerCAmelCase_: Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
lowerCAmelCase_: List[Any] = timesteps[None].to(sample.device )
lowerCAmelCase_: Union[str, Any] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
lowerCAmelCase_: Any = self.time_mlp(lowerCamelCase__ )
else:
lowerCAmelCase_: Any = timestep_embed[..., None]
lowerCAmelCase_: str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_: Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_: Dict = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_: int = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_: Any = down_block_res_samples[-1:]
lowerCAmelCase_: str = down_block_res_samples[:-1]
lowerCAmelCase_: List[str] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
lowerCAmelCase_: Any = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ ) | 613 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowercase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
__lowercase = dataset.iloc[:, 1:2].values
__lowercase = dataset.iloc[:, 2].values
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y, test_size=0.2, random_state=0)
__lowercase = PolynomialFeatures(degree=4)
__lowercase = poly_reg.fit_transform(X)
__lowercase = LinearRegression()
pol_reg.fit(X_poly, y)
def lowercase ( )-> List[Any]:
'''simple docstring'''
plt.scatter(A_ , A_ , color="red" )
plt.plot(A_ , pol_reg.predict(poly_reg.fit_transform(A_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 707 |
"""simple docstring"""
def lowercase ( A_ )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
a : List[Any] = [True] * (num + 1)
a : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A_ ):
a : List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 135 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''blenderbot-small'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=5_0265 , UpperCAmelCase : List[Any]=512 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : Union[str, Any]=2048 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Any=8 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : str=16 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : List[Any] , ) -> Dict:
'''simple docstring'''
lowercase : Tuple =vocab_size
lowercase : List[str] =max_position_embeddings
lowercase : str =d_model
lowercase : Tuple =encoder_ffn_dim
lowercase : Dict =encoder_layers
lowercase : Dict =encoder_attention_heads
lowercase : Optional[int] =decoder_ffn_dim
lowercase : List[str] =decoder_layers
lowercase : Tuple =decoder_attention_heads
lowercase : Optional[Any] =dropout
lowercase : Dict =attention_dropout
lowercase : List[Any] =activation_dropout
lowercase : Any =activation_function
lowercase : str =init_std
lowercase : str =encoder_layerdrop
lowercase : Union[str, Any] =decoder_layerdrop
lowercase : Optional[Any] =use_cache
lowercase : Tuple =encoder_layers
lowercase : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase : str ={0: '''batch'''}
lowercase : List[str] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : List[str] ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : Any =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase : Union[str, Any] =self.num_layers
for i in range(UpperCAmelCase ):
lowercase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : str ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase : int =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def A__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Tuple =super().outputs
else:
lowercase : Any =super(UpperCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase : Any =self.num_layers
for i in range(UpperCAmelCase ):
lowercase : Any ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : str ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A__ ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
lowercase : str =seq_length if not self.use_past else 1
lowercase : str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase : str =dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : int =common_inputs['''input_ids'''].shape
lowercase : str =common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase : List[Any] =self.num_attention_heads
lowercase : Union[str, Any] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Dict =decoder_seq_length + 3
lowercase : List[str] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Dict =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
lowercase : List[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase : Union[str, Any] =self.num_layers
lowercase : Optional[int] =min(UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
lowercase : str ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
lowercase : List[Any] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def A__ ( self : int , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase : List[Any] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : Any =seqlen + 2
lowercase , lowercase : Optional[int] =self.num_layers
lowercase , lowercase : Union[str, Any] =self.num_attention_heads
lowercase : List[str] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : int =common_inputs['''attention_mask'''].dtype
lowercase : Tuple =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
lowercase : Dict =[
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def A__ ( self : Union[str, Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase : Tuple =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : int =tokenizer.num_special_tokens_to_add(UpperCAmelCase )
lowercase : Optional[int] =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase : List[str] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : Dict =dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def A__ ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Any =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
elif self.task == "causal-lm":
lowercase : Dict =self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
lowercase : Union[str, Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def A__ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Any =super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
lowercase : Optional[int] =super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 94 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {"tokenizer_file": "tokenizer.json"}
lowercase : Any = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = ['input_ids', 'attention_mask']
lowercase : Optional[int] = None
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase=False , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__UpperCamelCase : Optional[int] = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
__UpperCamelCase : int = add_prefix_space
__UpperCamelCase : List[Any] = pre_tok_class(**__UpperCamelCase )
__UpperCamelCase : List[Any] = add_prefix_space
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : int = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : Any = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
__UpperCamelCase : int = input_ids[-self.model_max_length :]
return input_ids | 327 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''Hello, World!'''
__lowerCAmelCase = '''en_XX'''
def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[int]:
lowercase__: List[Any] = Path('data_bin' )
lowercase__: Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case ).parent ) , checkpoint_file=Path(snake_case ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(snake_case ) , bpe='sentencepiece' , sentencepiece_model=str(Path(snake_case ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(snake_case )
lowercase__: Dict = xmod.model.encoder.sentence_encoder
lowercase__: int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase__: Any = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , snake_case )
lowercase__: Tuple = XmodForSequenceClassification(snake_case ) if classification_head else XmodForMaskedLM(snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase__: Tuple = xmod_sent_encoder.embed_tokens.weight
lowercase__: Tuple = xmod_sent_encoder.embed_positions.weight
lowercase__: Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase__: List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowercase__: int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase__: Dict = model.roberta.encoder.layer[i]
lowercase__: List[Any] = xmod_sent_encoder.layers[i]
# self attention
lowercase__: int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowercase__: str = xmod_layer.self_attn.q_proj.weight
lowercase__: Dict = xmod_layer.self_attn.q_proj.bias
lowercase__: Optional[int] = xmod_layer.self_attn.k_proj.weight
lowercase__: str = xmod_layer.self_attn.k_proj.bias
lowercase__: Tuple = xmod_layer.self_attn.v_proj.weight
lowercase__: Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase__: int = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowercase__: List[Any] = xmod_layer.self_attn.out_proj.weight
lowercase__: List[str] = xmod_layer.self_attn.out_proj.bias
lowercase__: int = xmod_layer.self_attn_layer_norm.weight
lowercase__: Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase__: Union[str, Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowercase__: List[Any] = xmod_layer.fca.weight
lowercase__: str = xmod_layer.fca.bias
# output
lowercase__: Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowercase__: str = xmod_layer.fca.weight
lowercase__: Tuple = xmod_layer.fca.bias
lowercase__: List[str] = xmod_layer.final_layer_norm.weight
lowercase__: Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase__: Optional[int] = xmod_layer.adapter_layer_norm.weight
lowercase__: Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase__: List[Any] = bert_output.adapter_modules[lang_code]
lowercase__: List[str] = xmod_layer.adapter_modules[lang_code]
lowercase__: Optional[int] = from_adapter.fca.weight
lowercase__: Optional[int] = from_adapter.fca.bias
lowercase__: Optional[int] = from_adapter.fca.weight
lowercase__: List[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase__: Union[str, Any] = xmod_sent_encoder.layer_norm.weight
lowercase__: Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase__: Dict = xmod.model.classification_heads['mnli'].dense.weight
lowercase__: Any = xmod.model.classification_heads['mnli'].dense.bias
lowercase__: int = xmod.model.classification_heads['mnli'].out_proj.weight
lowercase__: Union[str, Any] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowercase__: Optional[Any] = xmod.model.encoder.lm_head.dense.weight
lowercase__: List[str] = xmod.model.encoder.lm_head.dense.bias
lowercase__: Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowercase__: Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowercase__: Tuple = xmod.model.encoder.lm_head.weight
lowercase__: List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase__: Dict = xmod.encode(snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case )
lowercase__: Tuple = model(snake_case )[0]
if classification_head:
lowercase__: Dict = xmod.model.classification_heads['mnli'](xmod.extract_features(snake_case ) )
else:
lowercase__: Dict = xmod.model(snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase__: List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase__: int = torch.allclose(snake_case , snake_case , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 335 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 335 | 1 |
'''simple docstring'''
snake_case_ : Union[str, Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A__ ( ):
_UpperCamelCase : str = input('Enter message: ' )
_UpperCamelCase : Union[str, Any] = input('Enter key [alphanumeric]: ' )
_UpperCamelCase : int = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_UpperCamelCase : Tuple = 'encrypt'
_UpperCamelCase : Union[str, Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('d' ):
_UpperCamelCase : List[Any] = 'decrypt'
_UpperCamelCase : Tuple = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'\n{mode.title()}ed message:' )
print(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , 'encrypt' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , 'decrypt' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Optional[Any] = key.upper()
for symbol in message:
_UpperCamelCase : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 195 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = position
_UpperCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase : Optional[Any] = []
for position in positions:
_UpperCamelCase , _UpperCamelCase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase_ )
return permissible_positions
def A__ ( UpperCAmelCase_ ):
return not any(elem == 0 for row in board for elem in row )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if is_complete(UpperCAmelCase_ ):
return True
for position in get_valid_pos(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase , _UpperCamelCase : Any = position
if board[y][x] == 0:
_UpperCamelCase : int = curr + 1
if open_knight_tour_helper(UpperCAmelCase_ , UpperCAmelCase_ , curr + 1 ):
return True
_UpperCamelCase : Union[str, Any] = 0
return False
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 1
if open_knight_tour_helper(UpperCAmelCase_ , (i, j) , 1 ):
return board
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 1 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : List[Any] = _symbol_database.Default()
UpperCAmelCase : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
UpperCAmelCase : Any = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[Any] = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : str = 4_5
UpperCAmelCase : Optional[Any] = 1_5_8_1
UpperCAmelCase : List[str] = 1_5_1_7
UpperCAmelCase : Dict = 1_5_7_0
UpperCAmelCase : List[str] = 1_5_8_4
UpperCAmelCase : Any = 1_7_9_3
UpperCAmelCase : Dict = 1_7_9_5
UpperCAmelCase : Tuple = 1_9_1_6
UpperCAmelCase : List[Any] = 1_8_6_4
UpperCAmelCase : Tuple = 1_9_0_5
UpperCAmelCase : Optional[int] = 1_9_1_9
UpperCAmelCase : Optional[int] = 2_4_2_9
UpperCAmelCase : Any = 2_2_0_8
UpperCAmelCase : int = 2_4_1_8
UpperCAmelCase : Union[str, Any] = 2_3_2_3
UpperCAmelCase : Any = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 47 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=13 , lowerCAmelCase_ : Dict=[30, 30] , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Dict=10 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = n_targets
SCREAMING_SNAKE_CASE_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE_ = num_patches + 1 + self.num_detection_tokens
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE_ = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = torch.rand(self.n_targets , 4 , device=lowerCAmelCase_ )
labels.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = YolosModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowercase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE_ = model(pixel_values=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase : Optional[Any] = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : Any = False
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = False
def _lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple=False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE_ = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.ones(
size=(self.model_tester.n_targets,) , device=lowerCAmelCase_ , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.ones(
self.model_tester.n_targets , 4 , device=lowerCAmelCase_ , dtype=torch.float )
labels.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = labels
return inputs_dict
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = YolosModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = YolosModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE_ = image_processor.post_process_object_detection(
lowerCAmelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE_ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE_ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(lowerCAmelCase_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , lowerCAmelCase_ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , lowerCAmelCase_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , lowerCAmelCase_ ) )
| 393 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Any=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="None" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = position_biased_input
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = scope
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaModel(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaForMaskedLM(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDebertaVaForTokenClassification(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Optional[int] = False
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
SCREAMING_SNAKE_CASE_ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 )
| 393 | 1 |
"""simple docstring"""
def a__ ( a : list ):
"""simple docstring"""
if len(a ) <= 1:
return [tuple(a )]
_snake_case : List[str] = []
def generate(a : int , a : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Tuple = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : int = arr[k - 1], arr[0]
generate(k - 1 , a )
generate(len(a ) , a )
return res
if __name__ == "__main__":
_a : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
_a : Dict = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
def __lowerCAmelCase ( __snake_case = 200 ):
__lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
__lowerCAmelCase = [0] * (pence + 1)
__lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 367 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCAmelCase ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(__snake_case ) )
__lowerCAmelCase = os.path.join(__snake_case , "words.txt" )
__lowerCAmelCase = ""
with open(__snake_case ) as f:
__lowerCAmelCase = f.readline()
__lowerCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
__lowerCAmelCase = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 367 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
_lowerCAmelCase = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
_lowerCAmelCase = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def UpperCamelCase ( a , a ) -> Tuple:
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = simple_accuracy(a , a )
__magic_name__ = float(fa_score(y_true=a , y_pred=a ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( a , a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = np.array(a )
__magic_name__ = np.array(a )
__magic_name__ = en_sentvecs.shape[0]
# mean centering
__magic_name__ = en_sentvecs - np.mean(a , axis=0 )
__magic_name__ = in_sentvecs - np.mean(a , axis=0 )
__magic_name__ = cdist(a , a , '''cosine''' )
__magic_name__ = np.array(range(a ) )
__magic_name__ = sim.argsort(axis=1 )[:, :10]
__magic_name__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def snake_case__ ( self : Optional[int] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : Union[str, Any] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a__ , a__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a__ , a__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a__ , a__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 245 |
'''simple docstring'''
_lowerCAmelCase = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 245 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A = ['bert-base-uncased', 'bert-base-cased']
A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCAmelCase__ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : Union[str, Any] ) -> List[str]:
super().__init__()
_lowerCamelCase = tokenizer
_lowerCamelCase = AutoConfig.from_pretrained(snake_case__ )
_lowerCamelCase = TFAutoModel.from_config(snake_case__ )
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = self.tokenizer(snake_case__ )
_lowerCamelCase = self.bert(**snake_case__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> str:
super().setUp()
_lowerCamelCase = [
BertTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCamelCase = [TFBertTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case__ , use_fast_bert_tokenizer=snake_case__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCamelCase = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self : Any ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCamelCase = tokenizer(snake_case__ , return_tensors='tf' , padding='longest' )
_lowerCamelCase = tf_tokenizer(snake_case__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _snake_case ( self : int ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase = tf_tokenizer(self.paired_sentences )
_lowerCamelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _snake_case ( self : Dict ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase = tf.function(snake_case__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCamelCase = tf.constant(snake_case__ )
_lowerCamelCase = compiled_tokenizer(snake_case__ )
_lowerCamelCase = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self : int ) -> int:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase = ModelToSave(tokenizer=snake_case__ )
_lowerCamelCase = tf.convert_to_tensor(self.test_sentences )
_lowerCamelCase = model(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase = Path(snake_case__ ) / 'saved.model'
model.save(snake_case__ )
_lowerCamelCase = tf.keras.models.load_model(snake_case__ )
_lowerCamelCase = loaded_model(snake_case__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 544 | def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> float:
return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A = int(input('Enter the base: ').strip())
A = int(input('Enter the exponent: ').strip())
A = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A = 1 / result
print(F'''{base} to the power of {exponent} is {result}''') | 544 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCamelCase : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
# test with a raw waveform
snake_case : Optional[int] = np.zeros((34000,) )
snake_case : List[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = examples
snake_case : Union[str, Any] = audio_classifier(SCREAMING_SNAKE_CASE_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case : Dict = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
self.run_torchaudio(SCREAMING_SNAKE_CASE_ )
@require_torchaudio
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import datasets
# test with a local file
snake_case : Optional[Any] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
snake_case : Any = dataset[0]["""audio"""]["""array"""]
snake_case : Optional[Any] = audio_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = """anton-l/wav2vec2-random-tiny-classifier"""
snake_case : Union[str, Any] = pipeline("""audio-classification""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = np.ones((8000,) )
snake_case : int = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
snake_case : List[Any] = [
{"""score""": 0.08_42, """label""": """no"""},
{"""score""": 0.08_38, """label""": """up"""},
{"""score""": 0.08_37, """label""": """go"""},
{"""score""": 0.08_34, """label""": """right"""},
]
snake_case : Dict = [
{"""score""": 0.08_45, """label""": """stop"""},
{"""score""": 0.08_44, """label""": """on"""},
{"""score""": 0.08_41, """label""": """right"""},
{"""score""": 0.08_34, """label""": """left"""},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case : Optional[int] = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
snake_case : Tuple = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
import datasets
snake_case : Tuple = """superb/wav2vec2-base-superb-ks"""
snake_case : List[str] = pipeline("""audio-classification""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : int = datasets.load_dataset("""anton-l/superb_dummy""" ,"""ks""" ,split="""test""" )
snake_case : Tuple = np.array(dataset[3]["""speech"""] ,dtype=np.floataa )
snake_case : List[Any] = audio_classifier(SCREAMING_SNAKE_CASE_ ,top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=3 ) ,[
{"""score""": 0.9_81, """label""": """go"""},
{"""score""": 0.0_07, """label""": """up"""},
{"""score""": 0.0_06, """label""": """_unknown_"""},
{"""score""": 0.0_01, """label""": """down"""},
] ,)
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
| 702 |
from __future__ import annotations
from collections import Counter
from random import random
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Optional[Any] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = probability
def snake_case_ ( self ):
'''simple docstring'''
return list(self.connections )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase ( __A : str , __A : list[tuple[str, str, float]] , __A : int ) -> dict[str, int]:
'''simple docstring'''
snake_case : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__A , __A , __A )
snake_case : Dict = Counter(graph.get_nodes() )
snake_case : int = start
for _ in range(__A ):
snake_case : Optional[int] = graph.transition(__A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = iter(lowercase__ )
while True:
a__ = tuple(itertools.islice(lowercase__ , lowercase__ ) )
if not chunk:
return
yield chunk
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
a__ = ''
if len(lowercase__ ) < 2:
return dirty
for i in range(len(lowercase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase__ ) & 1:
clean += "X"
return clean
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase__ )
return table
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = generate_table(lowercase__ )
a__ = prepare_input(lowercase__ )
a__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
a__ = divmod(table.index(lowercase__ ) , 5 )
a__ = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = generate_table(lowercase__ )
a__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
a__ = divmod(table.index(lowercase__ ) , 5 )
a__ = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 331 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 630 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
A : Optional[int] = list[list[float | int]]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for row in range(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCAmelCase = matrix[row][col]
__lowerCAmelCase = vector[row][0]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while row < size and col < size:
# pivoting
__lowerCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCamelCase , _UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCAmelCase , __lowerCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCamelCase ):
__lowerCAmelCase = augmented[rowa][col] / augmented[row][col]
__lowerCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCamelCase ):
for row in range(_UpperCamelCase ):
__lowerCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCamelCase )
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = [[0] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for x_val, y_val in enumerate(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCAmelCase = (x_val + 1) ** (size - col - 1)
__lowerCAmelCase = y_val
__lowerCAmelCase = solve(_UpperCamelCase , _UpperCamelCase )
def interpolated_func(_UpperCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCamelCase ) )
return interpolated_func
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowerCamelCase ( _UpperCamelCase = question_function , _UpperCamelCase = 10 ):
'''simple docstring'''
__lowerCAmelCase = [func(_UpperCamelCase ) for x_val in range(1 , order + 1 )]
__lowerCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCAmelCase = 0
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for poly in polynomials:
__lowerCAmelCase = 1
while func(_UpperCamelCase ) == poly(_UpperCamelCase ):
x_val += 1
ret += poly(_UpperCamelCase )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 282 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCamelCase ( _UpperCamelCase = "dhaka" , _UpperCamelCase = 5 ):
'''simple docstring'''
__lowerCAmelCase = min(_UpperCamelCase , 50 ) # Prevent abuse!
__lowerCAmelCase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowerCAmelCase = requests.get("https://www.google.com/search" , params=_UpperCamelCase , headers=_UpperCamelCase )
__lowerCAmelCase = BeautifulSoup(html.text , "html.parser" )
__lowerCAmelCase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowerCAmelCase = json.dumps(_UpperCamelCase )
__lowerCAmelCase = json.loads(_UpperCamelCase )
__lowerCAmelCase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _UpperCamelCase , )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_UpperCamelCase ) , )
__lowerCAmelCase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _UpperCamelCase , )
for index, fixed_full_res_image in enumerate(_UpperCamelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = bytes(_UpperCamelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(_UpperCamelCase )
__lowerCAmelCase = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCamelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
A : Any = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 282 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowercase__ : int = (low + high) // 2
lowercase__ , lowercase__ , lowercase__ : List[str] = max_subarray(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ , lowercase__ , lowercase__ : int = max_subarray(lowerCamelCase__ , mid + 1 , lowerCamelCase__ )
lowercase__ , lowercase__ , lowercase__ : str = max_cross_sum(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = float("-inf" ), -1
lowercase__ , lowercase__ : int = float("-inf" ), -1
lowercase__ : int | float = 0
for i in range(lowerCamelCase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowercase__ : Union[str, Any] = summ
lowercase__ : int = i
lowercase__ : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowercase__ : Union[str, Any] = summ
lowercase__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [randint(1 , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
lowercase__ : Any = time.time()
max_subarray(lowerCamelCase__ , 0 , input_size - 1 )
lowercase__ : int = time.time()
return end - start
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
lowercase__ : Union[str, Any] = [time_max_subarray(lowerCamelCase__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowerCamelCase__ , lowerCamelCase__ ):
print(lowerCamelCase__ , "\t\t" , lowerCamelCase__ )
plt.plot(lowerCamelCase__ , lowerCamelCase__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 496 |
import math
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any]=0 ): # a graph with Node 0,1,...,N-1
lowercase__ : Dict = n
lowercase__ : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE )] for i in range(0 , SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowercase__ : Optional[int] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE )] for i in range(0 , SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[Any] = w
def snake_case ( self : int ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase__ : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 496 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = 0
__lowercase = sum(a_ )
create_state_space_tree(a_ , a_ , a_ , a_ , a_ , a_ )
return result
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int , ):
'''simple docstring'''
if sum(a_ ) > max_sum or (remaining_nums_sum + sum(a_ )) < max_sum:
return
if sum(a_ ) == max_sum:
result.append(a_ )
return
for index in range(a_ , len(a_ ) ):
create_state_space_tree(
a_ , a_ , index + 1 , [*path, nums[index]] , a_ , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 3_4, 4, 1_2, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 712 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE = '''<<<<<<< This should probably be modified because it mentions: '''
_SCREAMING_SNAKE_CASE = '''=======
>>>>>>>
'''
_SCREAMING_SNAKE_CASE = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_SCREAMING_SNAKE_CASE = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser(
'''convert''' ,help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' ,)
train_parser.add_argument(
'''--tfds_path''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' ,)
train_parser.add_argument(
'''--datasets_directory''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,*_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_logger('''datasets-cli/converting''' )
__lowercase = tfds_path
__lowercase = datasets_directory
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
__lowercase = []
__lowercase = []
__lowercase = {}
if os.path.isdir(self._tfds_path ):
__lowercase = os.listdir(_lowerCamelCase )
else:
__lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowerCamelCase ,encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = False
__lowercase = False
__lowercase = []
for line in lines:
__lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__lowercase = ''''''
continue
elif "from absl import logging" in out_line:
__lowercase = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__lowercase = out_line.replace('''getLogger''' ,'''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase = True
__lowercase = list(filter(lambda _lowerCamelCase : e in out_line ,_lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '''\n''' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' ,_lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__lowercase = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase = f_name.replace('''.py''' ,'''''' )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
__lowercase = os.path.basename(_lowerCamelCase )
__lowercase = imports_to_builder_map[f_name.replace('''.py''' ,'''''' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(_lowerCamelCase ,_lowerCamelCase )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 56 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_A = pytest.mark.integration
_A = {"comet"}
_A = importlib.util.find_spec("fairseq") is not None
_A = {"code_eval"}
_A = os.name == "nt"
_A = {"bertscore", "frugalscore", "perplexity"}
_A = importlib.util.find_spec("transformers") is not None
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self ,_snake_case ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self ,_snake_case )
return wrapper
def lowercase (_snake_case ) -> List[str]:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self ,_snake_case ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self ,_snake_case )
return wrapper
def lowercase (_snake_case ) -> Any:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self ,_snake_case ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self ,_snake_case )
return wrapper
def lowercase () -> List[Any]:
'''simple docstring'''
__UpperCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case__ , snake_case__ , snake_case__ )
@local
class __UpperCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Union[str, Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def A ( self : str , A_ : int )-> List[Any]:
__UpperCamelCase = "[...]"
__UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , A_ ) ).module_path )
__UpperCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=A_ )
# check parameters
__UpperCamelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
__UpperCamelCase = doctest.testmod(A_ , verbose=A_ , raise_on_error=A_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def A ( self : Optional[int] , A_ : int )-> Dict:
__UpperCamelCase = "[...]"
__UpperCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , A_ ) ).module_path )
# run doctest
with self.use_local_metrics():
__UpperCamelCase = doctest.testmod(A_ , verbose=A_ , raise_on_error=A_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def A ( self : Optional[int] , A_ : str , A_ : List[Any] )-> Dict:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A_ ):
yield
else:
yield
@contextmanager
def A ( self : Any )-> Tuple:
def load_local_metric(A_ : Tuple , *A_ : Any , **A_ : Optional[int] ):
return load_metric(os.path.join("metrics" , A_ ) , *A_ , **A_ )
with patch("datasets.load_metric" ) as mock_load_metric:
__UpperCamelCase = load_local_metric
yield
@classmethod
def A ( cls : Tuple , A_ : Dict )-> Dict:
def wrapper(A_ : Union[str, Any] ):
__UpperCamelCase = contextmanager(A_ )
__UpperCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def lowercase (_snake_case ) -> str:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def A ( self : Tuple , A_ : str )-> Optional[int]:
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__UpperCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def lowercase (_snake_case ) -> Tuple:
'''simple docstring'''
import torch
def bert_cos_score_idf(_snake_case ,_snake_case ,*_snake_case ,**_snake_case ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__UpperCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
def load_from_checkpoint(_snake_case ):
class __UpperCAmelCase :
"""simple docstring"""
def A ( self : Any , A_ : Optional[int] , *A_ : Tuple , **A_ : List[Any] )-> str:
assert len(A_ ) == 2
__UpperCamelCase = [0.19, 0.92]
return scores, sum(A_ ) / len(A_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__UpperCamelCase = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__UpperCamelCase = load_from_checkpoint
yield
def lowercase () -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = load_metric(os.path.join("metrics" ,"seqeval" ) )
__UpperCamelCase = "ERROR"
__UpperCamelCase = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_snake_case ,match=re.escape(_snake_case ) ):
metric.compute(predictions=[] ,references=[] ,scheme=_snake_case ) | 505 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = KandinskyInpaintPipeline
_snake_case : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case : str = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : Optional[Any] = False
@property
def A ( self : int )-> Tuple:
return 32
@property
def A ( self : int )-> List[Any]:
return 32
@property
def A ( self : Dict )-> Tuple:
return self.time_input_dim
@property
def A ( self : Union[str, Any] )-> Tuple:
return self.time_input_dim * 4
@property
def A ( self : Dict )-> str:
return 1_00
@property
def A ( self : int )-> Dict:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : str )-> List[Any]:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any]=0 )-> Dict:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A ( self : Union[str, Any] )-> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any )-> str:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = "a hat"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ ) | 505 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__a = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__a = [ord(letter) for letter in string.ascii_lowercase]
__a = {ord(char) for char in VALID_CHARS}
__a = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( a_: list[int], a_: tuple[int, ...] ):
_UpperCAmelCase : str = ""
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for keychar, cipherchar in zip(cycle(a_ ), a_ ):
_UpperCAmelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a_ )
return decoded
def __UpperCAmelCase ( a_: list[int] ):
_UpperCAmelCase : list[str] = []
for key in product(a_, repeat=3 ):
_UpperCAmelCase : List[str] = try_key(a_, a_ )
if encoded is not None:
possibles.append(a_ )
return possibles
def __UpperCAmelCase ( a_: list[str], a_: str ):
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( a_: str = "p059_cipher.txt" ):
_UpperCAmelCase : list[int]
_UpperCAmelCase : list[str]
_UpperCAmelCase : str
_UpperCAmelCase : str
_UpperCAmelCase : str = Path(a_ ).parent.joinpath(a_ ).read_text(encoding="utf-8" )
_UpperCAmelCase : Optional[Any] = [int(a_ ) for number in data.strip().split("," )]
_UpperCAmelCase : Dict = filter_valid_chars(a_ )
for common_word in COMMON_WORDS:
_UpperCAmelCase : str = filter_common_word(a_, a_ )
if len(a_ ) == 1:
break
_UpperCAmelCase : Union[str, Any] = possibles[0]
return sum(ord(a_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'{solution() = }') | 257 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 257 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = SwinConfig()
UpperCAmelCase__: List[str] = swin_name.split("_" )
UpperCAmelCase__: Any = name_split[1]
UpperCAmelCase__: Any = int(name_split[4] )
UpperCAmelCase__: Dict = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase__: Any = 9_6
UpperCAmelCase__: int = (2, 2, 6, 2)
UpperCAmelCase__: Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCAmelCase__: Tuple = 9_6
UpperCAmelCase__: List[Any] = (2, 2, 1_8, 2)
UpperCAmelCase__: Dict = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCAmelCase__: List[str] = 1_2_8
UpperCAmelCase__: Union[str, Any] = (2, 2, 1_8, 2)
UpperCAmelCase__: Dict = (4, 8, 1_6, 3_2)
else:
UpperCAmelCase__: Dict = 1_9_2
UpperCAmelCase__: Dict = (2, 2, 1_8, 2)
UpperCAmelCase__: Dict = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
UpperCAmelCase__: List[Any] = 2_1_8_4_1
else:
UpperCAmelCase__: List[Any] = 1_0_0_0
UpperCAmelCase__: Optional[Any] = "huggingface/label-files"
UpperCAmelCase__: str = "imagenet-1k-id2label.json"
UpperCAmelCase__: Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase__: Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase__: List[Any] = idalabel
UpperCAmelCase__: List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__: Optional[int] = img_size
UpperCAmelCase__: str = num_classes
UpperCAmelCase__: List[Any] = embed_dim
UpperCAmelCase__: int = depths
UpperCAmelCase__: int = num_heads
UpperCAmelCase__: Optional[Any] = window_size
return config
def _A ( SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
UpperCAmelCase__: Tuple = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase__: List[str] = name.replace("patch_embed.norm" ,"embeddings.norm" )
if "layers" in name:
UpperCAmelCase__: Tuple = "encoder." + name
if "attn.proj" in name:
UpperCAmelCase__: int = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase__: Dict = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase__: Optional[Any] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase__: List[Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__: int = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__: str = name.replace("mlp.fc2" ,"output.dense" )
if name == "norm.weight":
UpperCAmelCase__: Union[str, Any] = "layernorm.weight"
if name == "norm.bias":
UpperCAmelCase__: str = "layernorm.bias"
if "head" in name:
UpperCAmelCase__: List[str] = name.replace("head" ,"classifier" )
else:
UpperCAmelCase__: str = "swin." + name
return name
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase__: Union[str, Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase__: Optional[int] = key.split("." )
UpperCAmelCase__: List[str] = int(key_split[1] )
UpperCAmelCase__: Tuple = int(key_split[3] )
UpperCAmelCase__: Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__: Union[str, Any] = val[:dim, :]
UpperCAmelCase__: Dict = val[
dim : dim * 2, :
]
UpperCAmelCase__: Optional[int] = val[-dim:, :]
else:
UpperCAmelCase__: List[str] = val[
:dim
]
UpperCAmelCase__: List[str] = val[
dim : dim * 2
]
UpperCAmelCase__: Any = val[
-dim:
]
else:
UpperCAmelCase__: int = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = timm.create_model(SCREAMING_SNAKE_CASE ,pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCAmelCase__: int = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__: Optional[int] = convert_state_dict(timm_model.state_dict() ,SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__: Optional[int] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" ,"-" ) ) )
UpperCAmelCase__: Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE ,stream=SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase__: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE ,return_tensors="pt" )
UpperCAmelCase__: int = timm_model(inputs["pixel_values"] )
UpperCAmelCase__: str = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase : List[str] =parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 113 |
import fire
from utils import calculate_rouge, save_json
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=None ,**SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = [x.strip() for x in open(SCREAMING_SNAKE_CASE ).readlines()]
UpperCAmelCase__: Dict = [x.strip() for x in open(SCREAMING_SNAKE_CASE ).readlines()][: len(SCREAMING_SNAKE_CASE )]
UpperCAmelCase__: Any = calculate_rouge(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,**SCREAMING_SNAKE_CASE )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,indent=SCREAMING_SNAKE_CASE )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 113 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: int = """blip_text_model"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Tuple=3_0524 , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : Optional[int]=3072 , lowerCamelCase_ : Dict=768 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : str=8 , lowerCamelCase_ : Optional[int]=512 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Union[str, Any]=1e-12 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Tuple=3_0522 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=102 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=True , **lowerCamelCase_ : List[str] , ):
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , sep_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =encoder_hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =projection_dim
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =is_decoder
_lowerCAmelCase =use_cache
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : Union[str, Any] ):
cls._set_token_in_kwargs(lowerCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
_lowerCAmelCase =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: List[Any] = """blip_vision_model"""
def __init__( self : Tuple , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : Tuple=512 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Any=384 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Optional[Any]=1e-5 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[str]=1e-10 , **lowerCamelCase_ : int , ):
super().__init__(**lowerCamelCase_ )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =projection_dim
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : List[Any] ):
cls._set_token_in_kwargs(lowerCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
_lowerCAmelCase =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: int = """blip"""
a_: int = True
def __init__( self : Any , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : Dict=2.6592 , lowerCamelCase_ : List[Any]=256 , **lowerCamelCase_ : List[Any] , ):
super().__init__(**lowerCamelCase_ )
if text_config is None:
_lowerCAmelCase ={}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
_lowerCAmelCase ={}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
_lowerCAmelCase =BlipTextConfig(**lowerCamelCase_ )
_lowerCAmelCase =BlipVisionConfig(**lowerCamelCase_ )
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =projection_dim
_lowerCAmelCase =logit_scale_init_value
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
_lowerCAmelCase =image_text_hidden_size
@classmethod
def lowerCAmelCase__ ( cls : Dict , lowerCamelCase_ : BlipTextConfig , lowerCamelCase_ : BlipVisionConfig , **lowerCamelCase_ : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 703 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Dict = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def snake_case_ ( lowercase__ : str = "dhaka" , lowercase__ : int = 5 ):
'''simple docstring'''
_lowerCAmelCase =min(lowercase__ , 50 ) # Prevent abuse!
_lowerCAmelCase ={
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_lowerCAmelCase =requests.get("""https://www.google.com/search""" , params=lowercase__ , headers=lowercase__ )
_lowerCAmelCase =BeautifulSoup(html.text , """html.parser""" )
_lowerCAmelCase ="""""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_lowerCAmelCase =json.dumps(lowercase__ )
_lowerCAmelCase =json.loads(lowercase__ )
_lowerCAmelCase =re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , lowercase__ , )
if not matched_google_image_data:
return 0
_lowerCAmelCase =re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(lowercase__ ) , )
_lowerCAmelCase =re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =urllib.request.build_opener()
_lowerCAmelCase =[
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(lowercase__ )
_lowerCAmelCase =f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : Any = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 149 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase :
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : str=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=5_1_2 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = projection_dim
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
_snake_case = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = TFDPRContextEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = TFDPRReader(config=__lowerCamelCase )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ : Optional[int] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ : List[Any] = False
A__ : Any = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : str = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = TFDPRModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRQuestionEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDPRReader.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
_snake_case = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case = model(__lowerCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 103 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any =logging.get_logger(__name__)
# TODO Update this
A_ : List[str] ={
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''esm'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_026 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = emb_layer_norm_before
a_ = token_dropout
a_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
a_ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = EsmFoldConfig(**_UpperCAmelCase )
a_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
a_ = get_default_vocab_list()
else:
a_ = vocab_list
else:
a_ = None
a_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _UpperCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self ):
"""simple docstring"""
a_ = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
a_ = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 1_28
snake_case_ = None
def lowercase__ ( self ):
"""simple docstring"""
if self.trunk is None:
a_ = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
a_ = TrunkConfig(**self.trunk )
def lowercase__ ( self ):
"""simple docstring"""
a_ = asdict(self )
a_ = self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = 48
snake_case_ = 10_24
snake_case_ = 1_28
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 1_28
snake_case_ = None
def lowercase__ ( self ):
"""simple docstring"""
if self.structure_module is None:
a_ = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
a_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
a_ = self.sequence_state_dim // self.sequence_head_width
a_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self ):
"""simple docstring"""
a_ = asdict(self )
a_ = self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = 3_84
snake_case_ = 1_28
snake_case_ = 16
snake_case_ = 1_28
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def lowercase__ ( self ):
"""simple docstring"""
return asdict(self )
def lowerCamelCase_ ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 483 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCAmelCase = ["gpt2"]
_lowerCAmelCase = "gpt2"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( tf.Module ):
def __init__( self : List[str] , a__ : List[str] ):
super().__init__()
__magic_name__ = tokenizer
__magic_name__ = AutoConfig.from_pretrained(_UpperCamelCase )
__magic_name__ = TFGPTaLMHeadModel.from_config(_UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def snake_case__ ( self : List[Any] , a__ : Tuple ):
__magic_name__ = self.tokenizer(_UpperCamelCase )
__magic_name__ = tokenized["""input_ids"""].to_tensor()
__magic_name__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__magic_name__ = self.model(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] ):
super().setUp()
__magic_name__ = [GPTaTokenizer.from_pretrained(_UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__magic_name__ = [TFGPTaTokenizer.from_pretrained(_UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__magic_name__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__magic_name__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : Tuple ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__magic_name__ = tokenizer([test_inputs] , return_tensors='''tf''' )
__magic_name__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__magic_name__ = python_outputs[key].numpy()
__magic_name__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case__ ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.function(_UpperCamelCase )
for test_inputs in self.test_sentences:
__magic_name__ = tf.constant(_UpperCamelCase )
__magic_name__ = compiled_tokenizer(_UpperCamelCase )
__magic_name__ = tf_tokenizer(_UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : Dict ):
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = ModelToSave(tokenizer=_UpperCamelCase )
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = model.serving(_UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__magic_name__ = Path(_UpperCamelCase ) / """saved.model"""
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': model.serving} )
__magic_name__ = tf.saved_model.load(_UpperCamelCase )
__magic_name__ = loaded_model.signatures["""serving_default"""](_UpperCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case__ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(_UpperCamelCase ) # Build model with some sample inputs
__magic_name__ = tf_tokenizer.get_config()
__magic_name__ = TFGPTaTokenizer.from_config(_UpperCamelCase )
__magic_name__ = model_from_config(_UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case__ ( self : Dict ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__magic_name__ = 12_3123
for max_length in [3, 5, 1024]:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(_UpperCamelCase , max_length=_UpperCamelCase )
__magic_name__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 715 |
'''simple docstring'''
from typing import List
import numpy as np
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = {key: len(a ) for key, value in gen_kwargs.items() if isinstance(a , a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__magic_name__ = max(lists_lengths.values() , default=0 )
return max(1 , a )
def UpperCamelCase ( a , a ) -> List[range]:
'''simple docstring'''
__magic_name__ = []
for group_idx in range(a ):
__magic_name__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ = range(a , start + num_shards_to_add )
shards_indices_per_group.append(a )
return shards_indices_per_group
def UpperCamelCase ( a , a ) -> List[dict]:
'''simple docstring'''
__magic_name__ = _number_of_shards_in_gen_kwargs(a )
if num_shards == 1:
return [dict(a )]
else:
__magic_name__ = _distribute_shards(num_shards=a , max_num_jobs=a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a , a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a ) )
]
def UpperCamelCase ( a ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase ( a , a ) -> dict:
'''simple docstring'''
__magic_name__ = {len(a ) for value in gen_kwargs.values() if isinstance(a , a )}
__magic_name__ = {}
for size in list_sizes:
__magic_name__ = list(range(a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ = dict(a )
for key, value in shuffled_kwargs.items():
if isinstance(a , a ):
__magic_name__ = [value[i] for i in indices_per_size[len(a )]]
return shuffled_kwargs
| 245 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
__lowerCAmelCase = Features({"text": Value("string" )} )
__lowerCAmelCase = Features({} )
__lowerCAmelCase = "text"
@property
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
return {self.text_column: "text"}
| 107 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 107 | 1 |
"""simple docstring"""
import cmath
import math
def _lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> complex:
'''simple docstring'''
__A : Dict = math.radians(_SCREAMING_SNAKE_CASE )
__A : Dict = math.radians(_SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
__A : Union[str, Any] = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : int = cmath.rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 | """simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger(__name__)
lowerCamelCase : Any ='''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCamelCase : Optional[int] ={
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : List[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Tuple = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : List[str] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : int = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__A : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : Dict = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
__A : Optional[Any] = {}
import re
__A : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Dict = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Union[str, Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : int = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Optional[Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Any = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : str = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__A : str = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Dict = regex_match.groups()
__A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
__A : List[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : Optional[int] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__A : List[str] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : List[str] = prefix + resnet_block
__A : Optional[int] = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = regex_match.groups()
__A : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__A : Union[str, Any] = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
__A : List[Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__A : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Optional[Any] = prefix + resnet_block
__A : List[Any] = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : str = regex_match.groups()
__A : str = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__A : int = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Tuple = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
__A : List[Any] = regex_match.groups()
__A : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__A : Tuple = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Any = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = regex_match.groups()
__A : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Any = {'1': 1, '3': 2}[groups[-2]]
__A : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__A : List[Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__A : Tuple = prefix + resnet_block
__A : List[Any] = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = regex_match.groups()
__A : int = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__A : Optional[Any] = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
__A : List[Any] = original_key
__A : List[Any] = replace_key(_SCREAMING_SNAKE_CASE )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__A : Optional[int] = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__A : Optional[Any] = original_key
__A : Union[str, Any] = original_key
__A : Optional[int] = value
return new_dict
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None ) -> Any:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__A : int = requests.get(F'{PREFIX}{file}' , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=_SCREAMING_SNAKE_CASE )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
__A : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Optional[int] = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__A : int = JukeboxModel(_SCREAMING_SNAKE_CASE )
__A : int = []
__A : Tuple = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : Any = old_dic[k]
elif k.endswith('.w' ):
__A : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Dict = old_dic[k]
else:
__A : str = old_dic[k]
__A : Dict = 'vqvae' if i == 0 else F'priors.{3 - i}'
__A : Dict = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
__A : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCamelCase : List[Any] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 237 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCAmelCase_ ( a : Optional[int] , a : Tuple ):
return sum(c * (x**i) for i, c in enumerate(a ) )
def lowerCAmelCase_ ( a : int , a : int ):
a__ = 0.0
for coeff in reversed(a ):
a__ = result * x + coeff
return result
if __name__ == "__main__":
__A : Tuple = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : List[Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 394 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
set_seed(770)
A_ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
A_ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
A_ = os.path.dirname(os.path.abspath(__file__))
A_ = os.path.join(os.path.expanduser("~"), ".cache")
A_ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _UpperCamelCase ( A , A=False ):
UpperCamelCase_ =model_type
if use_small:
key += "_small"
return os.path.join(A , REMOTE_MODEL_PATHS[key]["file_name"] )
def _UpperCamelCase ( A , A ):
os.makedirs(A , exist_ok=A )
hf_hub_download(repo_id=A , filename=A , local_dir=A )
def _UpperCamelCase ( A , A , A=False , A="text" ):
if model_type == "text":
UpperCamelCase_ =BarkSemanticModel
UpperCamelCase_ =BarkSemanticConfig
UpperCamelCase_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase_ =BarkCoarseModel
UpperCamelCase_ =BarkCoarseConfig
UpperCamelCase_ =BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase_ =BarkFineModel
UpperCamelCase_ =BarkFineConfig
UpperCamelCase_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase_ =f"""{model_type}_small""" if use_small else model_type
UpperCamelCase_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
UpperCamelCase_ =torch.load(A , map_location=A )
# this is a hack
UpperCamelCase_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
UpperCamelCase_ =model_args["vocab_size"]
UpperCamelCase_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase_ =model_args.pop("n_head" )
UpperCamelCase_ =model_args.pop("n_embd" )
UpperCamelCase_ =model_args.pop("n_layer" )
UpperCamelCase_ =ConfigClass(**checkpoint["model_args"] )
UpperCamelCase_ =ModelClass(config=A )
UpperCamelCase_ =GenerationConfigClass()
UpperCamelCase_ =model_generation_config
UpperCamelCase_ =checkpoint["model"]
# fixup checkpoint
UpperCamelCase_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(A ):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase_ =k[len(A ) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase_ =new_k.replace(A , new_layer_name_dict[old_layer_name] )
UpperCamelCase_ =state_dict.pop(A )
UpperCamelCase_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCamelCase_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
UpperCamelCase_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCamelCase_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(A ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(A ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(A , strict=A )
UpperCamelCase_ =model.num_parameters(exclude_embeddings=A )
UpperCamelCase_ =checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss""" )
model.eval()
model.to(A )
del checkpoint, state_dict
return model
def _UpperCamelCase ( A , A=False , A="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase_ ="cpu" # do conversion on cpu
UpperCamelCase_ =_get_ckpt_path(A , use_small=A )
UpperCamelCase_ =_load_model(A , A , model_type=A , use_small=A )
# load bark initial model
UpperCamelCase_ =_bark_load_model(A , "cpu" , model_type=A , use_small=A )
if model_type == "text":
UpperCamelCase_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
UpperCamelCase_ =5
UpperCamelCase_ =10
if model_type in ["text", "coarse"]:
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
UpperCamelCase_ =bark_model(A )[0]
UpperCamelCase_ =model(A )
# take last logits
UpperCamelCase_ =output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase_ =3
UpperCamelCase_ =8
UpperCamelCase_ =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCamelCase_ =model(A , A )
UpperCamelCase_ =bark_model(A , A )
UpperCamelCase_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
def _UpperCamelCase ( A , A , A , A , A , A , ):
UpperCamelCase_ =os.path.join(A , A )
UpperCamelCase_ =BarkSemanticConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkCoarseConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =BarkFineConfig.from_pretrained(os.path.join(A , "config.json" ) )
UpperCamelCase_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkSemanticModel.from_pretrained(A )
UpperCamelCase_ =BarkCoarseModel.from_pretrained(A )
UpperCamelCase_ =BarkFineModel.from_pretrained(A )
UpperCamelCase_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
UpperCamelCase_ =BarkConfig.from_sub_model_configs(
A , A , A , A )
UpperCamelCase_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCamelCase_ =BarkModel(A )
UpperCamelCase_ =semantic
UpperCamelCase_ =coarseAcoustic
UpperCamelCase_ =fineAcoustic
UpperCamelCase_ =codec
UpperCamelCase_ =bark_generation_config
Path(A ).mkdir(exist_ok=A )
bark.save_pretrained(A , repo_id=A , push_to_hub=A )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
A_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 391 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__A = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["input_ids", "attention_mask"]
A_ = MBartTokenizer
A_ = []
A_ = []
def __init__( self: str , __A: Optional[int]=None , __A: List[str]=None , __A: Optional[int]="<s>" , __A: Tuple="</s>" , __A: List[str]="</s>" , __A: Tuple="<s>" , __A: Dict="<unk>" , __A: Optional[Any]="<pad>" , __A: str="<mask>" , __A: List[Any]=None , __A: Any=None , __A: Union[str, Any]=None , **__A: int , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , )
_A = vocab_file
_A = False if not self.vocab_file else True
_A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_A = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A = src_lang if src_lang is not None else '''en_XX'''
_A = self.convert_tokens_to_ids(self._src_lang )
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self: Union[str, Any] ) -> str:
return self._src_lang
@src_lang.setter
def __A ( self: List[Any] , __A: str ) -> None:
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self: Union[str, Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self: str , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self: List[Any] , __A: List[Any] , __A: str , __A: Optional[str] , __A: Optional[str] , **__A: Any ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A = src_lang
_A = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
_A = self.convert_tokens_to_ids(__A )
_A = tgt_lang_id
return inputs
def __A ( self: List[str] , __A: List[str] , __A: str = "en_XX" , __A: Optional[List[str]] = None , __A: str = "ro_RO" , **__A: int , ) -> BatchEncoding:
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def __A ( self: List[Any] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self: Optional[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self: Tuple , __A: List[Any] ) -> None:
_A = self.convert_tokens_to_ids(__A )
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self: Optional[int] , __A: str ) -> None:
_A = self.convert_tokens_to_ids(__A )
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self: List[str] , __A: str , __A: Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_A = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase__, torch.Tensor ):
return image
elif isinstance(UpperCamelCase__, PIL.Image.Image ):
UpperCamelCase__ = [image]
UpperCamelCase__ = [trans(img.convert('''RGB''' ) ) for img in image]
UpperCamelCase__ = torch.stack(UpperCamelCase__ )
return image
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _a : int , _a : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
def A_ ( self : str , _a : int ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def A_ ( self : str , _a : Union[str, Any] , _a : Any , _a : Any ):
# get the original timestep using init_timestep
UpperCamelCase__ = min(int(num_inference_steps * strength ) , _a )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self : Optional[Any] , _a : List[Any] , _a : Optional[Any] , _a : Dict , _a : str , _a : Dict , _a : str=None ):
if not isinstance(_a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}""" )
UpperCamelCase__ = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ = init_latents.shape
UpperCamelCase__ = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
# get latents
print('''add noise to latents at timestep''' , _a )
UpperCamelCase__ = self.scheduler.add_noise(_a , _a , _a )
UpperCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _a : Union[torch.FloatTensor, PIL.Image.Image] = None , _a : float = 0.8 , _a : int = 1 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : float = 0.0 , _a : int = 50 , _a : Optional[bool] = None , _a : Optional[str] = "pil" , _a : bool = True , ):
self.check_inputs(_a )
# 2. Preprocess image
UpperCamelCase__ = preprocess(_a )
# 3. set timesteps
self.scheduler.set_timesteps(_a , device=self.device )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(_a , _a , self.device )
UpperCamelCase__ = timesteps[:1].repeat(_a )
# 4. Prepare latent variables
UpperCamelCase__ = self.prepare_latents(_a , _a , _a , self.unet.dtype , self.device , _a )
UpperCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(_a ):
# 1. predict noise model_output
UpperCamelCase__ = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a , ).prev_sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(_a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_a )
| 240 | import argparse
import os
import re
import packaging.version
lowercase = """examples/"""
lowercase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowercase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
lowercase = """README.md"""
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
UpperCamelCase__ = f.read()
UpperCamelCase__ , UpperCamelCase__ = REPLACE_PATTERNS[pattern]
UpperCamelCase__ = replace.replace('''VERSION''', UpperCamelCase__ )
UpperCamelCase__ = re_pattern.sub(UpperCamelCase__, UpperCamelCase__ )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__, UpperCamelCase__ ), UpperCamelCase__, pattern='''examples''' )
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''🤗 Transformers currently provides the following architectures'''
UpperCamelCase__ = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
UpperCamelCase__ = f.readlines()
# Find the start of the list.
UpperCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCamelCase__ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(UpperCamelCase__, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
UpperCamelCase__ = f.read()
UpperCamelCase__ = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCamelCase__ = default_version.base_version
elif patch:
UpperCamelCase__ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCamelCase__ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCamelCase__ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
UpperCamelCase__ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__, patch=UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = get_version()
UpperCamelCase__ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCamelCase__ = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
UpperCamelCase__ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowercase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 240 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=False , a_=False , a_=False , a_=2 , a_=99 , a_=0 , a_=32 , a_=5 , a_=4 , a_=0.1 , a_=0.1 , a_=512 , a_=12 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_="last" , a_=None , a_=None , ):
lowerCamelCase_ : str = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : List[Any] = seq_length
lowerCamelCase_ : int = is_training
lowerCamelCase_ : Optional[int] = use_input_lengths
lowerCamelCase_ : Optional[Any] = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Dict = gelu_activation
lowerCamelCase_ : Optional[int] = sinusoidal_embeddings
lowerCamelCase_ : str = causal
lowerCamelCase_ : Optional[int] = asm
lowerCamelCase_ : Dict = n_langs
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : List[Any] = n_special
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : int = type_vocab_size
lowerCamelCase_ : Optional[int] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Tuple = num_labels
lowerCamelCase_ : int = num_choices
lowerCamelCase_ : List[Any] = summary_type
lowerCamelCase_ : Optional[Any] = use_proj
lowerCamelCase_ : List[Any] = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_input_lengths:
lowerCamelCase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ : int = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Any = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , lengths=a_ , langs=a_ )
lowerCamelCase_ : List[str] = model(a_ , langs=a_ )
lowerCamelCase_ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Optional[Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : str = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Tuple = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(a_ )
lowerCamelCase_ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : List[str] = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(a_ )
lowerCamelCase_ : Any = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
lowerCamelCase_ : Any = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
(lowerCamelCase_) : Union[str, Any] = result_with_labels.to_tuple()
lowerCamelCase_ : int = model(a_ , start_positions=a_ , end_positions=a_ )
(lowerCamelCase_) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Optional[int] = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ )
lowerCamelCase_ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : List[Any] = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Union[str, Any] = self.num_choices
lowerCamelCase_ : int = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Dict = config_and_inputs
lowerCamelCase_ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self , a_ , a_ , a_=False ):
lowerCamelCase_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
lowerCamelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = FlaubertModelTester(self )
lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Dict = model_class(config=a_ )
lowerCamelCase_ : Tuple = self._prepare_for_class(a_ , a_ )
lowerCamelCase_ : List[Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowerCamelCase_ : Dict = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowerCamelCase_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(a_ )[0]
lowerCamelCase_ : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
snake_case_, snake_case_ = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_, snake_case_ = i, i + z_result[i] - 1
return z_result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE_ = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCamelCase__ ( a__ , a__=1_0_0 , a__=" ") -> List[str]:
"""simple docstring"""
_snake_case : Optional[Any] = text.split(a__)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a__) , a__)]
def lowerCamelCase__ ( a__) -> dict:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = [], []
for title, text in zip(documents['title'] , documents['text']):
if text is not None:
for passage in split_text(a__):
titles.append(title if title is not None else '')
texts.append(a__)
return {"title": titles, "text": texts}
def lowerCamelCase__ ( a__ , a__ , a__) -> dict:
"""simple docstring"""
_snake_case : Optional[int] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=a__ , padding='longest' , return_tensors='pt')['input_ids']
_snake_case : List[str] = ctx_encoder(input_ids.to(device=a__) , return_dict=a__).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase__ ( a__ , a__ , a__ , ) -> Optional[int]:
"""simple docstring"""
logger.info('Step 1 - Create the dataset')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case : List[Any] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case : Optional[Any] = dataset.map(a__ , batched=a__ , num_proc=processing_args.num_proc)
# And compute the embeddings
_snake_case : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a__)
_snake_case : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
_snake_case : Optional[int] = Features(
{'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))}) # optional, save as float32 instead of float64 to save space
_snake_case : List[str] = dataset.map(
partial(a__ , ctx_encoder=a__ , ctx_tokenizer=a__) , batched=a__ , batch_size=processing_args.batch_size , features=a__ , )
# And finally save your dataset
_snake_case : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset')
dataset.save_to_disk(a__)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings' , custom_index=a__)
# And save the index
_snake_case : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(a__)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=str(Path(lowercase_ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) ,metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase_ ,metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} ,)
SCREAMING_SNAKE_CASE__ : str = field(
default='''facebook/rag-sequence-nq''' ,metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} ,)
SCREAMING_SNAKE_CASE__ : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' ,metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=str(Path(lowercase_ ).parent / '''test_run''' / '''dummy-kb''' ) ,metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} ,)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase_ ,metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} ,)
SCREAMING_SNAKE_CASE__ : int = field(
default=16 ,metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} ,)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = field(
default=768 ,metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} ,)
SCREAMING_SNAKE_CASE__ : int = field(
default=128 ,metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 517 | 0 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
return vector * sigmoid(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 552 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase = ['''small''', '''medium''', '''large''']
lowercase = '''lm_head.decoder.weight'''
lowercase = '''lm_head.weight'''
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = d.pop(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowercase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
lowercase = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 211 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger()
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : list = field(default_factory=__SCREAMING_SNAKE_CASE )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : List = field(default_factory=__SCREAMING_SNAKE_CASE )
snake_case__ : bool = True
def __call__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = Tracker(self.dest )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : str = Tracker(self.src )(a__ ).parametrized
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(a__ )} operations while'
f' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super().__init__()
__SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
__SCREAMING_SNAKE_CASE : str = len(a__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleDict(a__ )
def a_ ( self , a__ ):
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ):
# default to timm!
if x not in self:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_name_to_timm(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = super().__getitem__(a__ )
return val
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __getitem__( self , a__ ):
if "seer" in x and "in1k" not in x:
__SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification
return val
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Tuple[str, str]] ):
"""simple docstring"""
for from_key, to_key in keys:
__SCREAMING_SNAKE_CASE : Optional[int] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = from_model_func()
__SCREAMING_SNAKE_CASE : Any = our_model_func(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Any = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(_SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__SCREAMING_SNAKE_CASE : str = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE )
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = (
our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__SCREAMING_SNAKE_CASE : Dict = from_model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__SCREAMING_SNAKE_CASE : str = our_outputs.hidden_states[-1]
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(f'Pushed {name}' )
def __A ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0_0_0
__SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
__SCREAMING_SNAKE_CASE : Optional[int] = NameToOurModelFuncMap()
__SCREAMING_SNAKE_CASE : List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="cpu" )
__SCREAMING_SNAKE_CASE : Dict = model_func()
# check if we have a head, if yes add it
__SCREAMING_SNAKE_CASE : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
__SCREAMING_SNAKE_CASE : int = model_state_dict["trunk"]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__SCREAMING_SNAKE_CASE : Any = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : str = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : int = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__SCREAMING_SNAKE_CASE : Tuple = partial(
_SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowercase = parser.parse_args()
lowercase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , *__a , **__a) -> None:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a)
| 716 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'OwlViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''')
if text is not None:
if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)):
_UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)]
elif isinstance(__a , __a) and isinstance(text[0] , __a):
_UpperCamelCase = []
# Maximum number of queries across batch
_UpperCamelCase = max([len(__a) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__a) != max_num_queries:
_UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a))
_UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)
encodings.append(__a)
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''')
if return_tensors == "np":
_UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0)
_UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0)
_UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
else:
raise ValueError('''Target return tensor type could not be returned''')
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = input_ids
_UpperCamelCase = attention_mask
if query_images is not None:
_UpperCamelCase = BatchEncoding()
_UpperCamelCase = self.image_processor(
__a , return_tensors=__a , **__a).pixel_values
_UpperCamelCase = query_pixel_values
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> str:
'''simple docstring'''
return self.image_processor.post_process(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Dict:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 78 | 0 |
from math import factorial
def _UpperCAmelCase ( a : int = 100 ):
return sum(int(a ) for x in str(factorial(a ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
def lowerCamelCase__ ( _lowerCamelCase = 10 , _lowerCamelCase = 1000 , _lowerCamelCase = True ) ->List[Any]:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
return int((number_a + number_a) / 2 )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowerCamelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
_UpperCAmelCase =lower
_UpperCAmelCase =higher
_UpperCAmelCase =[]
while True:
_UpperCAmelCase =get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
_UpperCAmelCase =number
elif answer(_lowerCamelCase ) == "high":
_UpperCAmelCase =number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def lowerCamelCase__ ( ) ->Union[str, Any]:
_UpperCAmelCase =int(input("Enter lower value : " ).strip() )
_UpperCAmelCase =int(input("Enter high value : " ).strip() )
_UpperCAmelCase =int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 711 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _A ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 526 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__( lowerCamelCase__ ):
lowercase__ = """Salesforce/blip-image-captioning-base"""
lowercase__ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase__ = """image_captioner"""
lowercase__ = AutoModelForVisionaSeq
lowercase__ = ["""image"""]
lowercase__ = ["""text"""]
def __init__( self : Tuple , *__snake_case : int , **__snake_case : List[Any] ):
requires_backends(self , ['vision'] )
super().__init__(*__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : "Image" ):
return self.pre_processor(images=__snake_case , return_tensors='pt' )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
return self.model.generate(**__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : Tuple ):
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip() | 526 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
snake_case__ : List[Any] = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
snake_case__ : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
snake_case__ : str = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
snake_case__ : Dict = requests.get(image_url).content
snake_case__ : Any = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 705 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ : str = len(lowerCamelCase_ )
UpperCAmelCase_ : int = max(lowerCamelCase_ )
UpperCAmelCase_ : int = min(lowerCamelCase_ )
# create the counting array
UpperCAmelCase_ : List[str] = coll_max + 1 - coll_min
UpperCAmelCase_ : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ : str = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase_ ) ):
UpperCAmelCase_ : Optional[int] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase ( lowerCamelCase_ : Any ):
"""simple docstring"""
return "".join([chr(lowerCamelCase_ ) for i in counting_sort([ord(lowerCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
snake_case__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : int = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 389 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__(self , UpperCAmelCase):
'''simple docstring'''
super().__init__()
__UpperCAmelCase =nn.ModuleList(UpperCAmelCase)
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase , UpperCAmelCase , self.nets)):
__UpperCAmelCase , __UpperCAmelCase =controlnet(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
# merge samples
if i == 0:
__UpperCAmelCase , __UpperCAmelCase =down_samples, mid_sample
else:
__UpperCAmelCase =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase , UpperCAmelCase)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ (self , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , ):
'''simple docstring'''
__UpperCAmelCase =0
__UpperCAmelCase =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase , is_main_process=UpperCAmelCase , save_function=UpperCAmelCase , safe_serialization=UpperCAmelCase , variant=UpperCAmelCase , )
idx += 1
__UpperCAmelCase =model_path_to_save + f"""_{idx}"""
@classmethod
def A__ (cls , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =0
__UpperCAmelCase =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCAmelCase =pretrained_model_path
while os.path.isdir(UpperCAmelCase):
__UpperCAmelCase =ControlNetModel.from_pretrained(UpperCAmelCase , **UpperCAmelCase)
controlnets.append(UpperCAmelCase)
idx += 1
__UpperCAmelCase =pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(UpperCAmelCase)} controlnets loaded from {pretrained_model_path}.""")
if len(UpperCAmelCase) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(UpperCAmelCase)}. Expected at least {pretrained_model_path + '_0'}.""")
return cls(UpperCAmelCase)
| 132 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =1_3
__UpperCAmelCase =7
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =9_9
__UpperCAmelCase =3_2
__UpperCAmelCase =2
__UpperCAmelCase =4
__UpperCAmelCase =3_7
__UpperCAmelCase ='''gelu'''
__UpperCAmelCase =0.1
__UpperCAmelCase =0.1
__UpperCAmelCase =5_1_2
__UpperCAmelCase =1_6
__UpperCAmelCase =2
__UpperCAmelCase =0.02
__UpperCAmelCase =3
__UpperCAmelCase =4
__UpperCAmelCase =None
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =None
if self.use_input_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFDistilBertModel(config=UpperCAmelCase)
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase =model(UpperCAmelCase)
__UpperCAmelCase =[input_ids, input_mask]
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFDistilBertForMaskedLM(config=UpperCAmelCase)
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFDistilBertForQuestionAnswering(config=UpperCAmelCase)
__UpperCAmelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =TFDistilBertForSequenceClassification(UpperCAmelCase)
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_choices
__UpperCAmelCase =TFDistilBertForMultipleChoice(UpperCAmelCase)
__UpperCAmelCase =tf.tile(tf.expand_dims(UpperCAmelCase , 1) , (1, self.num_choices, 1))
__UpperCAmelCase =tf.tile(tf.expand_dims(UpperCAmelCase , 1) , (1, self.num_choices, 1))
__UpperCAmelCase ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =TFDistilBertForTokenClassification(UpperCAmelCase)
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) =config_and_inputs
__UpperCAmelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ : List[Any] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : List[Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFDistilBertModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , dim=3_7)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
__UpperCAmelCase =TFDistilBertModel.from_pretrained(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''')
__UpperCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]])
__UpperCAmelCase =model(UpperCAmelCase)[0]
__UpperCAmelCase =[1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCAmelCase)
__UpperCAmelCase =tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4)
| 132 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
_snake_case : Dict = False
_snake_case : Tuple = False
def _A ( __snake_case :Namespace ) -> Dict:
"""simple docstring"""
return TrainCommand(__snake_case )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
@staticmethod
def __lowerCAmelCase ( _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = parser.add_parser("train", help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data", type=_a, required=_a, help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.", )
train_parser.add_argument(
"--column_label", type=_a, default=0, help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text", type=_a, default=1, help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id", type=_a, default=2, help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data", type=_a, default="", help="path to validation dataset." )
train_parser.add_argument(
"--validation_split", type=_a, default=0.1, help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.", )
train_parser.add_argument("--output", type=_a, default="./", help="path to saved the trained model." )
train_parser.add_argument(
"--task", type=_a, default="text_classification", help="Task to train the model on." )
train_parser.add_argument(
"--model", type=_a, default="bert-base-uncased", help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size", type=_a, default=32, help="Batch size for training." )
train_parser.add_argument("--valid_batch_size", type=_a, default=64, help="Batch size for validation." )
train_parser.add_argument("--learning_rate", type=_a, default=3E-5, help="Learning rate." )
train_parser.add_argument("--adam_epsilon", type=_a, default=1E-0_8, help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=_a )
def __init__( self, _a ) -> str:
__SCREAMING_SNAKE_CASE = logging.get_logger("transformers-cli/training" )
__SCREAMING_SNAKE_CASE = "tf" if is_tf_available() else "torch"
os.makedirs(args.output, exist_ok=_a )
__SCREAMING_SNAKE_CASE = args.output
__SCREAMING_SNAKE_CASE = args.column_label
__SCREAMING_SNAKE_CASE = args.column_text
__SCREAMING_SNAKE_CASE = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__SCREAMING_SNAKE_CASE = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
__SCREAMING_SNAKE_CASE = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
__SCREAMING_SNAKE_CASE = args.validation_split
__SCREAMING_SNAKE_CASE = args.train_batch_size
__SCREAMING_SNAKE_CASE = args.valid_batch_size
__SCREAMING_SNAKE_CASE = args.learning_rate
__SCREAMING_SNAKE_CASE = args.adam_epsilon
def __lowerCAmelCase ( self ) -> List[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self ) -> Optional[int]:
raise NotImplementedError
def __lowerCAmelCase ( self ) -> Any:
self.pipeline.fit(
self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size, )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 214 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""new-model"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
@require_tensorflow_probability
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> str:
try:
AutoConfig.register("new-model", _a )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
auto_class.register(_a, _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_a )
self.assertIsInstance(_a, _a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
_a, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_a, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_a, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_a, "Use `from_pt=True` to load this model" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __lowerCAmelCase ( self ) -> List[Any]:
# Make sure we have cached the model.
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 214 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
def constraint_to_multiple_of(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=0 ,UpperCamelCase_=None ):
snake_case = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case = math.ceil(val / multiple ) * multiple
return x
snake_case = (output_size, output_size) if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else output_size
snake_case , snake_case = get_image_size(UpperCamelCase_ )
snake_case , snake_case = output_size
# determine new height and width
snake_case = output_height / input_height
snake_case = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case = scale_width
else:
# fit height
snake_case = scale_height
snake_case = constraint_to_multiple_of(scale_height * input_height ,multiple=UpperCamelCase_ )
snake_case = constraint_to_multiple_of(scale_width * input_width ,multiple=UpperCamelCase_ )
return (new_height, new_width)
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = False , __snake_case = 1 , __snake_case = True , __snake_case = 1 / 2_5_5 , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
snake_case = get_size_dict(__snake_case )
snake_case = do_resize
snake_case = size
snake_case = keep_aspect_ratio
snake_case = ensure_multiple_of
snake_case = resample
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , __snake_case , __snake_case , __snake_case = False , __snake_case = 1 , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
snake_case = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case = get_resize_output_image_size(
__snake_case , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__snake_case , multiple=__snake_case , )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = size if size is not None else self.size
snake_case = get_size_dict(__snake_case )
snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case = resample if resample is not None else self.resample
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
snake_case = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
snake_case = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
snake_case = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__snake_case ):
snake_case = target_sizes.numpy()
snake_case = []
for idx in range(len(__snake_case ) ):
snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__snake_case )
snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
snake_case = logits.argmax(dim=1 )
snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 550 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = '''ZinengTang/tvlt-base'''
snake_case = tempfile.mkdtemp()
def a_ ( self , **__snake_case ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self , **__snake_case ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(__snake_case , return_tensors='''np''' )
snake_case = processor(audio=__snake_case , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 550 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : Any ):
'''simple docstring'''
UpperCAmelCase: Tuple = tmp_path / "file.csv"
UpperCAmelCase: str = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: str = tmp_path / "malformed_file.csv"
UpperCAmelCase: Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : List[Any] ):
'''simple docstring'''
UpperCAmelCase: List[Any] = tmp_path / "csv_with_image.csv"
UpperCAmelCase: Any = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : List[Any] ):
'''simple docstring'''
UpperCAmelCase: List[Any] = tmp_path / "csv_with_label.csv"
UpperCAmelCase: Optional[int] = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __UpperCAmelCase ( snake_case_ : Any ):
'''simple docstring'''
UpperCAmelCase: List[Any] = tmp_path / "csv_with_int_list.csv"
UpperCAmelCase: Union[str, Any] = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = Csv()
UpperCAmelCase: str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case_ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(snake_case_ ) in record.message
for record in caplog.records )
@require_pil
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase: Dict = f.read().splitlines()[1]
UpperCAmelCase: List[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
UpperCAmelCase: Dict = csv._generate_tables([[csv_file_with_image]] )
UpperCAmelCase: Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
UpperCAmelCase: int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase: Optional[int] = f.read().splitlines()[1:]
UpperCAmelCase: Optional[Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
UpperCAmelCase: Any = csv._generate_tables([[csv_file_with_label]] )
UpperCAmelCase: Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
UpperCAmelCase: Tuple = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(snake_case_ ) for label in labels]
def __UpperCAmelCase ( snake_case_ : str ):
'''simple docstring'''
UpperCAmelCase: Any = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda snake_case_ : [int(snake_case_ ) for i in x.split()]} )
UpperCAmelCase: Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCAmelCase: str = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
UpperCAmelCase: List[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 166 |
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( snake_case_ : list[float] ):
'''simple docstring'''
return np.maximum(0 , snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 166 | 1 |
def __A ( _A ):
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
__a = ""
while len(_A ) % 3 != 0:
__a = "0" + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(_A ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(_A ):
oct_val += int(2 ** (2 - index) * int(_A ) )
oct_string += str(_A )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197 | from typing import Any
def __A ( _A ):
"""simple docstring"""
if not input_list:
return []
__a = [input_list.count(_A ) for value in input_list]
__a = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def A_ (__a , __a , __a , __a ):
'''simple docstring'''
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
A_ = f'{src_lang}-{tgt_lang}'
A_ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__a , exist_ok=__a )
A_ = os.path.join(__a , "README.md" )
print(f'Generating {path}' )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
UpperCamelCase_ : int = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ : Dict = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ : Optional[int] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 714 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
UpperCamelCase_ : Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "markuplm"
def __init__( self : Optional[Any] , _snake_case : Dict=30_522 , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Union[str, Any]=12 , _snake_case : Union[str, Any]=3_072 , _snake_case : List[str]="gelu" , _snake_case : List[str]=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Dict=512 , _snake_case : Union[str, Any]=2 , _snake_case : Tuple=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Tuple=0 , _snake_case : Tuple=0 , _snake_case : Optional[Any]=2 , _snake_case : int=256 , _snake_case : Dict=1_024 , _snake_case : List[str]=216 , _snake_case : Optional[int]=1_001 , _snake_case : Union[str, Any]=32 , _snake_case : List[str]=50 , _snake_case : str="absolute" , _snake_case : Optional[Any]=True , _snake_case : str=None , **_snake_case : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
# additional properties
A_ = max_depth
A_ = max_xpath_tag_unit_embeddings
A_ = max_xpath_subs_unit_embeddings
A_ = tag_pad_id
A_ = subs_pad_id
A_ = xpath_unit_hidden_size
| 482 | 0 |
"""simple docstring"""
from __future__ import annotations
A = []
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Any = 1
solve(UpperCamelCase , row + 1 )
__UpperCAmelCase : Dict = 0
return False
def _UpperCamelCase ( UpperCamelCase ) -> None:
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
A = 8
A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 77 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("fixtures/test_sentencepiece.model")
A = {"target_lang": "fi", "source_lang": "en"}
A = ">>zh<<"
A = "Helsinki-NLP/"
if is_torch_available():
A = "pt"
elif is_tf_available():
A = "tf"
else:
A = "jax"
@require_sentencepiece
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= MarianTokenizer
A__= False
A__= True
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCAmelCase__ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
UpperCAmelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict , **_lowercase : Tuple ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = "</s>"
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 9 )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
UpperCAmelCase__ = en_de_tokenizer(["I am a small frog"] , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
UpperCAmelCase__ = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(_lowercase , batch.input_ids[0] )
UpperCAmelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowercase )
UpperCAmelCase__ = [x.name for x in Path(_lowercase ).glob("*" )]
self.assertIn("source.spm" , _lowercase )
MarianTokenizer.from_pretrained(_lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=_lowercase , truncation=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tok(["I am a tiny frog", "I am a small frog"] , padding=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
UpperCAmelCase__ = "Tämä on testi"
UpperCAmelCase__ = "This is a test"
UpperCAmelCase__ = [76, 7, 20_47, 2]
UpperCAmelCase__ = [69, 12, 11, 9_40, 2]
UpperCAmelCase__ = tokenizer(_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokenizer(text_target=_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
UpperCAmelCase__ = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 475 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any , A : Tuple ) -> Any:
__snake_case : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__snake_case : int = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__snake_case : int = F"""{src_lang}-{tgt_lang}"""
__snake_case : List[str] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(A , exist_ok=A )
__snake_case : Union[str, Any] = os.path.join(A , 'README.md' )
print(F"""Generating {path}""" )
with open(A , 'w' , encoding='utf-8' ) as f:
f.write(A )
# make sure we are under the root of the project
__A = Path(__file__).resolve().parent.parent.parent
__A = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A , __A , __A = model_name.split('''-''')
__A = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 714 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 61 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
SCREAMING_SNAKE_CASE_ = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2_56
def UpperCamelCase__ ( _lowercase : List[str] ) -> Optional[MinHash]:
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase: int = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase__ ( _lowercase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class a :
"""simple docstring"""
def __init__( self , *,
snake_case_ = 0.8_5 , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = duplication_jaccard_threshold
__UpperCAmelCase: List[str] = NUM_PERM
__UpperCAmelCase: int = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase: List[str] = defaultdict(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = self._index.query(snake_case_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase: List[str] = [base] + list(snake_case_ )
# reformat the cluster to be a list of dict
__UpperCAmelCase: List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(snake_case_ )
return duplicate_clusters
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.get_duplicate_clusters()
with open(snake_case_ , """w""" ) as f:
json.dump(snake_case_ , snake_case_ )
def UpperCamelCase__ ( _lowercase : List[str] ) -> List[Any]:
__UpperCAmelCase, __UpperCAmelCase: Tuple = element
__UpperCAmelCase: List[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase__ ( _lowercase : Type[Dataset] ) -> int:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCamelCase__ ( _lowercase : Type[Dataset] , _lowercase : float ) -> Optional[Any]:
__UpperCAmelCase: List[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=1_0_0 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase__ ( _lowercase : str , _lowercase : str ) -> float:
__UpperCAmelCase: Any = get_tokens(_lowercase )
__UpperCAmelCase: List[Any] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
SCREAMING_SNAKE_CASE_ = None
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase: int = []
for elementa in cluster:
__UpperCAmelCase: Optional[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__UpperCAmelCase: Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase: str = 1
extremes.append(_lowercase )
return extremes
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Tuple ) -> Dict:
global _shared_dataset
__UpperCAmelCase: Optional[int] = dataset
__UpperCAmelCase: Any = []
__UpperCAmelCase: Dict = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def UpperCamelCase__ ( _lowercase : Type[Dataset] , _lowercase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__UpperCAmelCase: int = make_duplicate_clusters(_lowercase , _lowercase )
__UpperCAmelCase: Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase: Union[str, Any] = {}
__UpperCAmelCase: Tuple = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase: Tuple = element
__UpperCAmelCase: Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase: Dict = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase: Dict = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase: Any = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(_lowercase )}''' )
print(F'''Number of duplicate clusters: {len(_lowercase )}''' )
print(F'''Files in duplicate cluster: {len(_lowercase )}''' )
print(F'''Unique files in duplicate cluster: {len(_lowercase )}''' )
print(F'''Filtered dataset size: {len(_lowercase )}''' )
return ds_filter, duplicate_clusters | 523 | '''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_ = ['text', 'image', 'audio']
def UpperCamelCase__ ( _lowercase : List[str] ) -> Tuple:
__UpperCAmelCase: Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def UpperCamelCase__ ( _lowercase : List ) -> List[str]:
__UpperCAmelCase: str = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
__UpperCAmelCase: Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__UpperCAmelCase: Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = create_inputs(self.tool.inputs )
__UpperCAmelCase: Any = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__UpperCAmelCase: List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__UpperCAmelCase: int = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) ) | 523 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
__snake_case = TypeVar('''_T''')
class lowercase ( Generic[_T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :list[_T] = list(iterable or [] )
UpperCamelCase__ :list[_T] = []
def __len__( self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
'''simple docstring'''
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
self._stacka.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self._stacka.pop
UpperCamelCase__ :int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 280 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( A__ ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' )
UpperCamelCase__ :Any = do_resize
UpperCamelCase__ :Union[str, Any] = size
UpperCamelCase__ :Any = resample
UpperCamelCase__ :Optional[Any] = do_center_crop
UpperCamelCase__ :List[str] = crop_size
UpperCamelCase__ :Optional[int] = do_rescale
UpperCamelCase__ :Optional[Any] = rescale_factor
UpperCamelCase__ :Any = do_normalize
UpperCamelCase__ :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ :Union[str, Any] = do_convert_rgb
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ :str = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :Optional[Any] = size if size is not None else self.size
UpperCamelCase__ :Optional[int] = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :Dict = resample if resample is not None else self.resample
UpperCamelCase__ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ :Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ :Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ )
UpperCamelCase__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :str = image_std if image_std is not None else self.image_std
UpperCamelCase__ :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ :str = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ :Any = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase__ :Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase__ :Dict = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase__ :Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase__ :Tuple = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCamelCase__ :List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase__ :Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ ) | 280 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowercase : Tuple , _lowercase : Optional[Any]=12 , _lowercase : List[Any]=7 , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=True , _lowercase : str=99 , _lowercase : Any=32 , _lowercase : Dict=32 , _lowercase : List[Any]=2 , _lowercase : List[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[str]=512 , _lowercase : List[str]=0.02 , _lowercase : int=0 , _lowercase : Optional[int]=None , ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = parent
_UpperCamelCase: Dict = batch_size
_UpperCamelCase: str = seq_length
_UpperCamelCase: Any = is_training
_UpperCamelCase: List[str] = use_input_mask
_UpperCamelCase: Optional[int] = use_labels
_UpperCamelCase: List[str] = vocab_size
_UpperCamelCase: List[str] = hidden_size
_UpperCamelCase: Any = projection_dim
_UpperCamelCase: Optional[Any] = num_hidden_layers
_UpperCamelCase: Union[str, Any] = num_attention_heads
_UpperCamelCase: List[Any] = intermediate_size
_UpperCamelCase: Dict = dropout
_UpperCamelCase: Union[str, Any] = attention_dropout
_UpperCamelCase: str = max_position_embeddings
_UpperCamelCase: Optional[Any] = initializer_range
_UpperCamelCase: Optional[Any] = scope
_UpperCamelCase: Optional[int] = bos_token_id
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: List[str] = None
if self.use_input_mask:
_UpperCamelCase: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase: Optional[int] = input_mask.numpy()
_UpperCamelCase: str = input_mask.shape
_UpperCamelCase: Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_UpperCamelCase: List[Any] = 1
_UpperCamelCase: int = 0
_UpperCamelCase: Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase ( self : Optional[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: str = TFBlipTextModel(config=__lowerCAmelCase )
_UpperCamelCase: str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCamelCase: List[str] = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.prepare_config_and_inputs()
_UpperCamelCase: Any = config_and_inputs
_UpperCamelCase: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Dict = False
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = BlipTextModelTester(self )
_UpperCamelCase: Dict = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase: int = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase ( self : Any , _lowercase : Dict=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase ) | 271 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''') | 46 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCAmelCase_ : Any = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 711 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( lowerCamelCase = "isbn/0140328726" ):
__magic_name__ : Optional[Any] =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__magic_name__ : Union[str, Any] =F"{olid} is not a valid Open Library olid"
raise ValueError(lowerCamelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__magic_name__ : List[Any] ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__magic_name__ : Dict =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__magic_name__ : List[str] =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =""", """.join(lowerCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase_ : List[Any] = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCAmelCase_ : str = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 367 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = GPTaTokenizer
__UpperCamelCase = GPTaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'add_prefix_space': True}
__UpperCamelCase = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
a__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
a__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a__ : List[Any] = {'''unk_token''': '''<unk>'''}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def __lowerCAmelCase ( self : Dict , **A__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCAmelCase ( self : str , **A__ : str ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCAmelCase ( self : int , A__ : Tuple ) -> List[Any]:
'''simple docstring'''
a__ : str = '''lower newer'''
a__ : List[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Optional[Any] = '''lower newer'''
a__ : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a__ : List[str] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
a__ : List[Any] = tokens + [tokenizer.unk_token]
a__ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : List[Any] = self.get_tokenizer()
a__ : Dict = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
a__ : Optional[Any] = '''lower newer'''
# Testing tokenization
a__ : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
a__ : List[str] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
a__ : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
a__ : str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
a__ : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
a__ : int = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
a__ : Optional[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
a__ : Any = tokens + [rust_tokenizer.unk_token]
a__ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __lowerCAmelCase ( self : Tuple , *A__ : Dict , **A__ : Any ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[Any]=1_5 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
a__ : Union[str, Any] = '''This is a simple input'''
a__ : Optional[int] = ['''This is a simple input 1''', '''This is a simple input 2''']
a__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
a__ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
a__ : str = '''This is a simple input'''
a__ : List[str] = ['''This is a simple input looooooooong''', '''This is a simple input''']
a__ : List[str] = ('''This is a simple input''', '''This is a pair''')
a__ : List[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
a__ : Any = tokenizer.pad_token_id
a__ : List[str] = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
a__ : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
a__ : Optional[int] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
a__ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = '''$$$'''
a__ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
a__ : Union[str, Any] = '''This is a simple input'''
a__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
a__ : Dict = tokenizer.bos_token_id
a__ : List[str] = tokenizer(__lowerCAmelCase )
a__ : str = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a__ : Any = tokenizer.decode(out_s.input_ids )
a__ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a__ : Optional[int] = '''Encode this.'''
a__ : Tuple = '''This one too please.'''
a__ : Union[str, Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
a__ : int = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
a__ : Optional[Any] = encoded_sequence_dict['''input_ids''']
a__ : Optional[int] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
a__ : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
a__ : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ) -> str:
'''simple docstring'''
a__ : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
a__ : Any = '''A photo of a cat'''
a__ : Any = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
a__ : int = AutoTokenizer.from_pretrained('''./test_opt''' )
a__ : Optional[Any] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
a__ : Optional[int] = '''A photo of a cat'''
a__ : Dict = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
a__ : Tuple = '''bos'''
a__ : List[Any] = tokenizer.get_vocab()['''bos''']
a__ : Union[str, Any] = '''A photo of a cat'''
a__ : Union[str, Any] = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
a__ : List[str] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
a__ : List[str] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 688 | from __future__ import annotations
def __lowerCAmelCase ( A_ : list[int] ) -> list[int]: # This function is recursive
__UpperCAmelCase = len(A_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase = array[0]
__UpperCAmelCase = False
__UpperCAmelCase = 1
__UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase = True
__UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase = longest_subsequence(A_ )
if len(A_ ) > len(A_ ):
__UpperCAmelCase = temp_array
else:
i += 1
__UpperCAmelCase = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase = [pivot, *longest_subsequence(A_ )]
if len(A_ ) > len(A_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 0 |
'''simple docstring'''
import argparse
import os
import re
__SCREAMING_SNAKE_CASE :Dict = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__SCREAMING_SNAKE_CASE :Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__SCREAMING_SNAKE_CASE :List[str] = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : bool = False ) -> int:
'''simple docstring'''
with open(__lowercase , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split("\n" )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(__lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(__lowercase , key=lambda __lowercase : _re_identifier.search(__lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__lowercase ) )
elif "\n".join(__lowercase ) != content:
return True
def UpperCAmelCase_ ( __lowercase : bool = False ) -> str:
'''simple docstring'''
_UpperCAmelCase = [os.path.join(__lowercase , __lowercase ) for f in os.listdir(__lowercase ) if f.endswith(".py" )]
_UpperCAmelCase = [sort_auto_mapping(__lowercase , overwrite=__lowercase ) for fname in fnames]
if not overwrite and any(__lowercase ):
_UpperCAmelCase = [f for f, d in zip(__lowercase , __lowercase ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(__lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 705 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : str ):
_UpperCAmelCase = "ylacombe/bark-small"
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = "en_speaker_1"
_UpperCAmelCase = "This is a test string"
_UpperCAmelCase = "speaker_embeddings_path.json"
_UpperCAmelCase = "speaker_embeddings"
def lowercase ( self : Optional[int] , **snake_case_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 3_5
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
"semantic_prompt": np.ones(snake_case_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case_ )
_UpperCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , "file.npz" )
np.savez(snake_case_ , **snake_case_ )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case_ )
_UpperCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case_ )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding="max_length" , max_length=2_5_6 , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 119 | 0 |
__A : Optional[Any] = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__A : Tuple = frozenset(['''prompt''', '''negative_prompt'''])
__A : Union[str, Any] = frozenset([])
__A : Optional[Any] = frozenset(['''image'''])
__A : int = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__A : str = frozenset(['''image'''])
__A : List[Any] = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__A : Dict = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__A : str = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__A : Optional[Any] = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__A : List[str] = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__A : Union[str, Any] = frozenset(['''image''', '''mask_image'''])
__A : List[Any] = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__A : str = frozenset(['''example_image''', '''image''', '''mask_image'''])
__A : str = frozenset(['''class_labels'''])
__A : Optional[Any] = frozenset(['''class_labels'''])
__A : str = frozenset(['''batch_size'''])
__A : str = frozenset([])
__A : Dict = frozenset(['''batch_size'''])
__A : Union[str, Any] = frozenset([])
__A : Optional[int] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__A : List[str] = frozenset(['''prompt''', '''negative_prompt'''])
__A : List[Any] = frozenset(['''input_tokens'''])
__A : str = frozenset(['''input_tokens'''])
| 343 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
_UpperCamelCase : Optional[Any] = []
def generate(lowercase_ ,lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCamelCase, _UpperCamelCase : List[str] = arr[k - 1], arr[i]
else: # k is odd
_UpperCamelCase, _UpperCamelCase : int = arr[k - 1], arr[0]
generate(k - 1 ,lowercase_ )
generate(len(lowercase_ ) ,lowercase_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 624 | 0 |
"""simple docstring"""
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a , __a ):
"""simple docstring"""
A__ = name
A__ = value
A__ = weight
def __repr__( self ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.value
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.name
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.weight
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.value / self.weight
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = []
for i in range(len(lowerCAmelCase__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ,reverse=lowerCAmelCase__ )
A__ = []
A__ , A__ = 0.0, 0.0
for i in range(len(lowerCAmelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( ):
A__ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' ,type=lowerCAmelCase__ ,default='data/dump.txt' ,help='The path to the data.' )
parser.add_argument('--tokenizer_type' ,type=lowerCAmelCase__ ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' ,type=lowerCAmelCase__ ,default='bert-base-uncased' ,help='The tokenizer to use.' )
parser.add_argument('--dump_file' ,type=lowerCAmelCase__ ,default='data/dump' ,help='The dump file prefix.' )
A__ = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A__ = BertTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A__ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['cls_token'] # `<s>`
A__ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A__ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A__ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path ,'r' ,encoding='utf8' ) as fp:
A__ = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(lowerCAmelCase__ )} examples to process.''' )
A__ = []
A__ = 0
A__ = 1_0000
A__ = time.time()
for text in data:
A__ = f'''{bos} {text.strip()} {sep}'''
A__ = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
A__ = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A__ = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(lowerCAmelCase__ )} examples processed.''' )
A__ = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A__ = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
A__ = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCAmelCase__ ,'wb' ) as handle:
pickle.dump(rslt_ ,lowerCAmelCase__ ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 554 | 1 |
'''simple docstring'''
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCAmelCase ():
"""simple docstring"""
_a = input('''Enter message: ''')
_a = input('''Enter key [alphanumeric]: ''')
_a = input('''Encrypt/Decrypt [e/d]: ''')
if mode.lower().startswith('''e'''):
_a = '''encrypt'''
_a = encrypt_message(__A , __A)
elif mode.lower().startswith('''d'''):
_a = '''decrypt'''
_a = decrypt_message(__A , __A)
print(F'''\n{mode.title()}ed message:''')
print(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return translate_message(__A , __A , '''encrypt''')
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return translate_message(__A , __A , '''decrypt''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = []
_a = 0
_a = key.upper()
for symbol in message:
_a = LETTERS.find(symbol.upper())
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index])
elif mode == "decrypt":
num -= LETTERS.find(key[key_index])
num %= len(__A)
if symbol.isupper():
translated.append(LETTERS[num])
elif symbol.islower():
translated.append(LETTERS[num].lower())
key_index += 1
if key_index == len(__A):
_a = 0
else:
translated.append(__A)
return "".join(__A)
if __name__ == "__main__":
main()
| 11 | """simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
SCREAMING_SNAKE_CASE__ : str ={value: key for key, value in encode_dict.items()}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : Dict = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
if set(SCREAMING_SNAKE_CASE_ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
_lowerCamelCase : List[Any] = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE_ ) != 0:
decoded += decode_dict[word[:5]]
_lowerCamelCase : Union[str, Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 434 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :List[Any] = logging.get_logger(__name__)
a :Optional[int] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """focalnet"""
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=False , _a=[192, 384, 768, 768] , _a=[2, 2, 6, 2] , _a=[2, 2, 2, 2] , _a=[3, 3, 3, 3] , _a="gelu" , _a=4.0 , _a=0.0 , _a=0.1 , _a=False , _a=1E-4 , _a=False , _a=False , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE__ : List[str] = use_conv_embed
SCREAMING_SNAKE_CASE__ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Any = focal_levels
SCREAMING_SNAKE_CASE__ : Optional[Any] = focal_windows
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : str = use_layerscale
SCREAMING_SNAKE_CASE__ : int = layerscale_value
SCREAMING_SNAKE_CASE__ : Optional[int] = use_post_layernorm
SCREAMING_SNAKE_CASE__ : Any = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize_modulator
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = encoder_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 12 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> list:
"""simple docstring"""
lowercase__ = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase : int = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 460 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_A = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=16 , UpperCamelCase : List[str]=13 , UpperCamelCase : Any=7 , UpperCamelCase : str=14 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Any=19 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=4 , UpperCamelCase : str=4 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=[1, 2, 3, 4, 5] , UpperCamelCase : str=25 , UpperCamelCase : Any=5 , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = d_model
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Dict = prediction_length
lowerCAmelCase__ : Tuple = context_length
lowerCAmelCase__ : Any = cardinality
lowerCAmelCase__ : Any = num_time_features
lowerCAmelCase__ : Tuple = lags_sequence
lowerCAmelCase__ : Tuple = embedding_dimension
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = context_length
lowerCAmelCase__ : Union[str, Any] = prediction_length + label_length
lowerCAmelCase__ : Optional[Any] = label_length
lowerCAmelCase__ : Union[str, Any] = moving_average
lowerCAmelCase__ : Any = autocorrelation_factor
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = config.context_length + max(config.lags_sequence )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase__ : Optional[int] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Optional[Any] = self.prepare_autoformer_inputs_dict(UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AutoformerModel(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowerCAmelCase__ : List[str] = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.encoder_last_hidden_state
lowerCAmelCase__ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = model.get_encoder()
encoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Any = AutoformerEncoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = model.create_network_inputs(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase__ : List[Any] = encoder(inputs_embeds=UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase__ : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase__ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase__ : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase__ : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoformerDecoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = decoder(
trend=UpperCamelCase , inputs_embeds=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase :int = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase :int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase :Tuple = False
_lowerCamelCase :int = False
_lowerCamelCase :List[Any] = False
_lowerCamelCase :Optional[int] = False
_lowerCamelCase :int = False
_lowerCamelCase :Any = False
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = model_class.from_pretrained(UpperCamelCase , output_loading_info=UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = inspect.signature(getattr(UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase__ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : str = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase )] , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = getattr(self.model_tester , """seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """decoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = getattr(self.model_tester , """encoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """d_model""" , UpperCamelCase )
lowerCAmelCase__ : Any = getattr(self.model_tester , """num_attention_heads""" , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : str = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase , UpperCamelCase )
# decoder attentions
lowerCAmelCase__ : List[str] = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase__ : int = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase ) )
lowerCAmelCase__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_ ( __UpperCAmelCase="train-batch.pt" ) -> Optional[int]:
lowerCAmelCase__ : Any = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__UpperCAmelCase , repo_type="""dataset""" )
lowerCAmelCase__ : Optional[int] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
return batch
@require_torch
@slow
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[str] = prepare_batch()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase__ : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase__ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase__ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase )
lowerCAmelCase__ : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase , rtol=1E-1 ) )
| 299 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase =logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Any =np.argmax(__lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf_8' ) as f:
_UpperCAmelCase : Union[str, Any] =csv.reader(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =[]
next(__lowerCamelCase ) # skip the first line
for line in tqdm(__lowerCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
for dataset in encoded_datasets:
_UpperCAmelCase : Any =len(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCAmelCase : Optional[int] =np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCAmelCase : Dict =np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
_UpperCAmelCase : str =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCamelCase ):
_UpperCAmelCase : int =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : str =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : Optional[int] =with_conta
_UpperCAmelCase : str =with_conta
_UpperCAmelCase : str =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Optional[Any] =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Any =with_conta
_UpperCAmelCase : Optional[Any] =with_conta
_UpperCAmelCase : str =mc_label
_UpperCAmelCase : Union[str, Any] =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : str =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCamelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--eval_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--seed' , type=__lowerCamelCase , default=4_2 )
parser.add_argument('--num_train_epochs' , type=__lowerCamelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__lowerCamelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__lowerCamelCase , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__lowerCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__lowerCamelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__lowerCamelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__lowerCamelCase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__lowerCamelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__lowerCamelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__lowerCamelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__lowerCamelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__lowerCamelCase , default=3_7_4 )
parser.add_argument('--server_ip' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
_UpperCAmelCase : Optional[Any] =parser.parse_args()
print(__lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCAmelCase : Any =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_UpperCAmelCase : Tuple =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowerCamelCase , __lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCAmelCase : List[str] =['_start_', '_delimiter_', '_classify_']
_UpperCAmelCase : Any =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCamelCase )
_UpperCAmelCase : str =tokenizer.convert_tokens_to_ids(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCamelCase ) )
model.to(__lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCamelCase : Dict ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return obj
return [tokenize_and_encode(__lowerCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
_UpperCAmelCase : int =load_rocstories_dataset(args.train_dataset )
_UpperCAmelCase : Dict =load_rocstories_dataset(args.eval_dataset )
_UpperCAmelCase : Optional[Any] =(train_dataset, eval_dataset)
_UpperCAmelCase : Dict =tokenize_and_encode(__lowerCamelCase )
# Compute the max input length for the Transformer
_UpperCAmelCase : int =model.config.n_positions // 2 - 2
_UpperCAmelCase : str =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCAmelCase : Tuple =min(__lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCAmelCase : Any =pre_process_datasets(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase )
_UpperCAmelCase : str =tensor_datasets[0], tensor_datasets[1]
_UpperCAmelCase : int =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : Optional[int] =RandomSampler(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.train_batch_size )
_UpperCAmelCase : Dict =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : str =SequentialSampler(__lowerCamelCase )
_UpperCAmelCase : int =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCAmelCase : List[Any] =args.max_steps
_UpperCAmelCase : Tuple =args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
_UpperCAmelCase : int =len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCAmelCase : int =list(model.named_parameters() )
_UpperCAmelCase : Optional[int] =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_UpperCAmelCase : int =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_UpperCAmelCase : Optional[Any] =AdamW(__lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCAmelCase : Dict =get_linear_schedule_with_warmup(
__lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCamelCase )
if args.do_train:
_UpperCAmelCase : Dict =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Tuple =0
_UpperCAmelCase : Any =tqdm(__lowerCamelCase , desc='Training' )
for step, batch in enumerate(__lowerCamelCase ):
_UpperCAmelCase : List[str] =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase : Optional[Any] =batch
_UpperCAmelCase : List[str] =model(__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : List[str] =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCAmelCase : Union[str, Any] =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCAmelCase : str ='Training loss: {:.2e} lr: {:.2e}'.format(__lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCAmelCase : Dict =model.module if hasattr(__lowerCamelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCAmelCase : List[str] =os.path.join(args.output_dir , __lowerCamelCase )
_UpperCAmelCase : Optional[int] =os.path.join(args.output_dir , __lowerCamelCase )
torch.save(model_to_save.state_dict() , __lowerCamelCase )
model_to_save.config.to_json_file(__lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCAmelCase : int =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCAmelCase : Optional[int] =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCamelCase )
if args.do_eval:
model.eval()
_UpperCAmelCase : Optional[Any] =0, 0
_UpperCAmelCase : Dict =0, 0
for batch in tqdm(__lowerCamelCase , desc='Evaluating' ):
_UpperCAmelCase : str =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase : Dict =batch
with torch.no_grad():
_UpperCAmelCase : Dict =model(
__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : str =mc_logits.detach().cpu().numpy()
_UpperCAmelCase : Optional[Any] =mc_labels.to('cpu' ).numpy()
_UpperCAmelCase : Tuple =accuracy(__lowerCamelCase , __lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCAmelCase : List[Any] =eval_loss / nb_eval_steps
_UpperCAmelCase : Union[str, Any] =eval_accuracy / nb_eval_examples
_UpperCAmelCase : Optional[Any] =tr_loss / nb_tr_steps if args.do_train else None
_UpperCAmelCase : Optional[Any] ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_UpperCAmelCase : Optional[Any] =os.path.join(args.output_dir , 'eval_results.txt' )
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowerCamelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : Union[str, Any] =add_prefix_space
_UpperCAmelCase : Optional[Any] =pre_tok_class(**snake_case)
_UpperCAmelCase : Union[str, Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : List[Any] ='post_processor'
_UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : int =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : List[str] =tuple(state['cls'])
_UpperCAmelCase : str =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Optional[int] =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : Union[str, Any] =trim_offsets
_UpperCAmelCase : Tuple =True
if changes_to_apply:
_UpperCAmelCase : str =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : Any =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Any =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 331 | 0 |
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 100 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
lowerCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase = True
lowerCAmelCase = """ml.p3.2xlarge"""
lowerCAmelCase = """accelerate_sagemaker_execution_role"""
lowerCAmelCase = """hf-sm"""
lowerCAmelCase = """us-east-1"""
lowerCAmelCase = 1
lowerCAmelCase = """accelerate-sagemaker-1"""
lowerCAmelCase = """1.6"""
lowerCAmelCase = """4.4"""
lowerCAmelCase = """train.py"""
lowerCAmelCase = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCAmelCase = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __snake_case ( self : Union[str, Any] )->int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__SCREAMING_SNAKE_CASE : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowercase_ )
assert isinstance(converted_args["do_train"] , lowercase_ )
assert isinstance(converted_args["epochs"] , lowercase_ )
assert isinstance(converted_args["learning_rate"] , lowercase_ )
assert isinstance(converted_args["max_steps"] , lowercase_ )
with pytest.raises(lowercase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 708 |
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Any = format(__lowerCamelCase , "08x" )[-8:]
__SCREAMING_SNAKE_CASE : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = b""
for char in message:
bit_string += format(__lowerCamelCase , "08b" ).encode("utf-8" )
__SCREAMING_SNAKE_CASE : List[str] = format(len(__lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCamelCase ) , 512 ):
__SCREAMING_SNAKE_CASE : int = bit_string[pos : pos + 512]
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = format(__lowerCamelCase , "032b" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCamelCase , 2 )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return (a + b) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = preprocess(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__SCREAMING_SNAKE_CASE : Tuple = 0X67452301
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xefcdab89
__SCREAMING_SNAKE_CASE : Optional[int] = 0X98badcfe
__SCREAMING_SNAKE_CASE : Optional[Any] = 0X10325476
__SCREAMING_SNAKE_CASE : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = aa
__SCREAMING_SNAKE_CASE : Union[str, Any] = ba
__SCREAMING_SNAKE_CASE : str = ca
__SCREAMING_SNAKE_CASE : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__SCREAMING_SNAKE_CASE : Union[str, Any] = d ^ (b & (c ^ d))
__SCREAMING_SNAKE_CASE : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__SCREAMING_SNAKE_CASE : int = c ^ (d & (b ^ c))
__SCREAMING_SNAKE_CASE : int = (5 * i + 1) % 16
elif i <= 47:
__SCREAMING_SNAKE_CASE : List[str] = b ^ c ^ d
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3 * i + 5) % 16
else:
__SCREAMING_SNAKE_CASE : Any = c ^ (b | not_aa(__lowerCamelCase ))
__SCREAMING_SNAKE_CASE : str = (7 * i) % 16
__SCREAMING_SNAKE_CASE : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__SCREAMING_SNAKE_CASE : Dict = d
__SCREAMING_SNAKE_CASE : str = c
__SCREAMING_SNAKE_CASE : Tuple = b
__SCREAMING_SNAKE_CASE : Optional[int] = sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__SCREAMING_SNAKE_CASE : Dict = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447 | 0 |
import pprint
import requests
UpperCAmelCase_ : List[Any] = "https://zenquotes.io/api"
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def UpperCamelCase ( )-> List[str]:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = random_quotes()
pprint.pprint(response)
| 491 | '''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase_ = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase_ = [3, 5]
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
for block in range(1 , _UpperCamelCase ):
for _ in range(_UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : Dict = 0
try:
lowercase__ : Any = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 390 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : int=2 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : List[str]=[32, 64, 128] , _UpperCamelCase : Any=[1, 2, 1] , _UpperCamelCase : Tuple=[2, 2, 4] , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=2.0 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : int=True , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : List[Any]=1E-5 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Dict=10 , _UpperCamelCase : Dict=8 , _UpperCamelCase : Any=["stage1", "stage2"] , _UpperCamelCase : Dict=[1, 2] , ) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : List[Any] = num_heads
_lowerCamelCase : Union[str, Any] = window_size
_lowerCamelCase : Optional[Any] = mlp_ratio
_lowerCamelCase : List[str] = qkv_bias
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = use_absolute_embeddings
_lowerCamelCase : List[str] = patch_norm
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : int = initializer_range
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Any = encoder_stride
_lowerCamelCase : List[Any] = out_features
_lowerCamelCase : str = out_indices
def _SCREAMING_SNAKE_CASE ( self : str) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int) ->Dict:
"""simple docstring"""
_lowerCamelCase : List[Any] = FocalNetModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = model(_UpperCamelCase)
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Any = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = FocalNetBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetForMaskedImageModeling(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Tuple = model(_UpperCamelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = FocalNetForMaskedImageModeling(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : str = self.type_sequence_label_size
_lowerCamelCase : Optional[int] = FocalNetForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Any = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCamelCase : List[str] = 1
_lowerCamelCase : int = FocalNetForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : int = FocalNetModelTester(self)
_lowerCamelCase : str = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
return
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Tuple = model_class(_UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[str] = model_class(_UpperCamelCase)
_lowerCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Union[str, Any] = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
# FocalNet has a different seq_length
_lowerCamelCase : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_lowerCamelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = reshaped_hidden_states[0].shape
_lowerCamelCase : Dict = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[str] = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = 3
_lowerCamelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Dict = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Dict = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = FocalNetModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = _config_zero_init(_UpperCamelCase)
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=_UpperCamelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Dict = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(_UpperCamelCase)
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_lowerCamelCase : List[str] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (FocalNetBackbone,) if is_torch_available() else ()
_snake_case = FocalNetConfig
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FocalNetModelTester(self)
| 15 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''open-llama'''
def __init__( self : List[Any] , __a : str=100000 , __a : Any=4096 , __a : Any=11008 , __a : Optional[int]=32 , __a : Any=32 , __a : str="silu" , __a : Dict=2048 , __a : Any=0.0_2 , __a : Optional[int]=1e-6 , __a : int=True , __a : str=0 , __a : str=1 , __a : Union[str, Any]=2 , __a : List[str]=False , __a : Union[str, Any]=True , __a : Optional[int]=0.1 , __a : Any=0.1 , __a : str=True , __a : Optional[Any]=True , __a : int=None , **__a : List[str] , ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = vocab_size
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = hidden_size
__snake_case : List[str] = intermediate_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : str = hidden_act
__snake_case : Dict = initializer_range
__snake_case : Union[str, Any] = rms_norm_eps
__snake_case : Union[str, Any] = use_cache
__snake_case : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , __a )
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[Any] = attention_dropout_prob
__snake_case : Optional[int] = use_stable_embedding
__snake_case : int = shared_input_output_embedding
__snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : List[str] = self.rope_scaling.get('type' , __a )
__snake_case : str = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 286 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
A__ = ['''note_seq''']
def __init__( self : Tuple , *__a : int , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def A_ ( cls : Optional[Any] , *__a : Optional[Any] , **__a : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 286 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ):
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase ( nn.Module ):
def __init__(self ) -> Optional[int]:
super().__init__()
UpperCamelCase_ : List[str] = nn.Linear(3 , 4 )
UpperCamelCase_ : int = nn.BatchNormad(4 )
UpperCamelCase_ : List[Any] = nn.Linear(4 , 5 )
def A_ (self , __UpperCamelCase ) -> str:
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(__UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__UpperCamelCase , [128, 64, 32, 16, 8] )
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Any = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__UpperCamelCase , __UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(__UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase_,UpperCamelCase_ : str = mock_training_loop_function("""hello""" )
self.assertListEqual(__UpperCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def A_ (self ) -> List[str]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__UpperCamelCase ):
pass
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def A_ (self ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__UpperCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def A_ (self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def A_ (self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__UpperCamelCase ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def A_ (self ) -> List[str]:
UpperCamelCase_ : Optional[int] = torch.cuda.memory_allocated()
UpperCamelCase_ : Any = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __UpperCamelCase )
UpperCamelCase_ : List[Any] = release_memory(__UpperCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __UpperCamelCase )
| 138 | from __future__ import annotations
import pandas as pd
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : List[Any] = [0] * no_of_processes
UpperCamelCase_ : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[Any] = burst_time[i]
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Tuple = 9_9999_9999
UpperCamelCase_ : str = 0
UpperCamelCase_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase_ : Dict = remaining_time[j]
UpperCamelCase_ : Dict = j
UpperCamelCase_ : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase_ : Union[str, Any] = remaining_time[short]
if minm == 0:
UpperCamelCase_ : Optional[Any] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase_ : Any = False
# Find finish time of current process
UpperCamelCase_ : str = increment_time + 1
# Calculate waiting time
UpperCamelCase_ : Tuple = finish_time - arrival_time[short]
UpperCamelCase_ : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase_ : Dict = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ):
UpperCamelCase_ : Union[str, Any] = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Union[str, Any] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Tuple = total_waiting_time + waiting_time[i]
UpperCamelCase_ : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
SCREAMING_SNAKE_CASE : Optional[Any] = int(input())
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = map(int, input().split())
SCREAMING_SNAKE_CASE : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE : str = burst_time
SCREAMING_SNAKE_CASE : List[Any] = no_of_processes
SCREAMING_SNAKE_CASE : Optional[int] = waiting_time
SCREAMING_SNAKE_CASE : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
SCREAMING_SNAKE_CASE : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 138 | 1 |
"""simple docstring"""
import random
class lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( UpperCamelCase__ ) -> tuple[list[int], list[int]]:
'''simple docstring'''
lowerCamelCase_ = [ord(UpperCamelCase__ ) for i in text]
lowerCamelCase_ = []
lowerCamelCase_ = []
for i in plain:
lowerCamelCase_ = random.randint(1 , 300 )
lowerCamelCase_ = (i + k) * k
cipher.append(UpperCamelCase__ )
key.append(UpperCamelCase__ )
return cipher, key
@staticmethod
def _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = []
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCamelCase__ ) )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
__lowercase , __lowercase : Any = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k)) | 142 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowercase : Any = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
lowerCamelCase_ = [image]
lowerCamelCase_ = [trans(img.convert('''RGB''' ) ) for img in image]
lowerCamelCase_ = torch.stack(_lowerCamelCase )
return image
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}""" )
lowerCamelCase_ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = init_latents.shape
lowerCamelCase_ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
print('''add noise to latents at timestep''' , UpperCamelCase__ )
lowerCamelCase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(UpperCamelCase__ )
# 2. Preprocess image
lowerCamelCase_ = preprocess(UpperCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
lowerCamelCase_ = timesteps[:1].repeat(UpperCamelCase__ )
# 4. Prepare latent variables
lowerCamelCase_ = self.prepare_latents(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.unet.dtype , self.device , UpperCamelCase__ )
lowerCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase__ ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase__ ) | 142 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 359 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( UpperCamelCase : bytes , UpperCamelCase : int ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : Optional[int] = """1"""
UpperCAmelCase : Dict = """f32le"""
UpperCAmelCase : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase : str = ffmpeg_process.communicate(UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
UpperCAmelCase : Tuple = output_stream[0]
UpperCAmelCase : Union[str, Any] = np.frombuffer(UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : str = "f32le" , ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : List[str] = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase : int = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : int = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
UpperCAmelCase : Optional[Any] = platform.system()
if system == "Linux":
UpperCAmelCase : Tuple = """alsa"""
UpperCAmelCase : Any = """default"""
elif system == "Darwin":
UpperCAmelCase : Any = """avfoundation"""
UpperCAmelCase : int = """:0"""
elif system == "Windows":
UpperCAmelCase : Optional[int] = """dshow"""
UpperCAmelCase : str = """default"""
UpperCAmelCase : Dict = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase : Union[str, Any] = _ffmpeg_stream(UpperCamelCase , UpperCamelCase )
for item in iterator:
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase : str = "f32le" , ):
if stream_chunk_s is not None:
UpperCAmelCase : List[Any] = stream_chunk_s
else:
UpperCAmelCase : List[Any] = chunk_length_s
UpperCAmelCase : Union[str, Any] = ffmpeg_microphone(UpperCamelCase , UpperCamelCase , format_for_conversion=UpperCamelCase )
if format_for_conversion == "s16le":
UpperCAmelCase : Dict = np.intaa
UpperCAmelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : Any = np.floataa
UpperCAmelCase : Dict = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
UpperCAmelCase : Tuple = chunk_length_s / 6
UpperCAmelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase , (int, float) ):
UpperCAmelCase : str = [stride_length_s, stride_length_s]
UpperCAmelCase : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase : List[Any] = datetime.datetime.now()
UpperCAmelCase : Tuple = datetime.timedelta(seconds=UpperCamelCase )
for item in chunk_bytes_iter(UpperCamelCase , UpperCamelCase , stride=(stride_left, stride_right) , stream=UpperCamelCase ):
# Put everything back in numpy scale
UpperCAmelCase : Dict = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase )
UpperCAmelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple[int, int] , UpperCamelCase : bool = False ):
UpperCAmelCase : Any = B""""""
UpperCAmelCase , UpperCAmelCase : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
UpperCAmelCase : int = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase ) < chunk_len:
UpperCAmelCase : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase : List[str] = (_stride_left, stride_right)
UpperCAmelCase : List[str] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase : List[str] = False
yield item
UpperCAmelCase : Optional[Any] = stride_left
UpperCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase ) > stride_left:
UpperCAmelCase : Union[str, Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase : str = False
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase , stdout=subprocess.PIPE , bufsize=UpperCamelCase ) as ffmpeg_process:
while True:
UpperCAmelCase : Optional[Any] = ffmpeg_process.stdout.read(UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 359 | 1 |
def lowerCAmelCase_ ( lowercase: str ) -> Any:
'''simple docstring'''
return "".join(chr(ord(lowercase_ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 271 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__SCREAMING_SNAKE_CASE : List[str] = 10_0002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = 100_2003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 674 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__magic_name__ = '''\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'''
__magic_name__ = '''\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'''
__magic_name__ = '''\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=1 , _UpperCAmelCase="binary" , _UpperCAmelCase=None , _UpperCAmelCase="warn" , ):
__snake_case : Any = recall_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE , zero_division=_SCREAMING_SNAKE_CASE , )
return {"recall": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Any ) -> str:
for attribute in key.split(""".""" ):
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Any = value
elif weight_type == "weight_v":
_lowerCAmelCase : List[str] = value
elif weight_type == "bias":
_lowerCAmelCase : int = value
else:
_lowerCAmelCase : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Any ) -> str:
_lowerCAmelCase : Any = []
_lowerCAmelCase : str = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : List[str] = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Optional[int] = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Optional[Any] = fairseq_model.proj
_lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Dict = True
if "*" in mapped_key:
_lowerCAmelCase : Dict = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : Optional[Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Tuple = "bias"
elif "weight" in name:
_lowerCAmelCase : Union[str, Any] = "weight"
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : int = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Tuple = name.split(""".""" )
_lowerCAmelCase : int = int(items[0] )
_lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Tuple:
_lowerCAmelCase : Any = emb.weight.shape
_lowerCAmelCase : Optional[int] = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
_lowerCAmelCase : str = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> List[str]:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
_lowerCAmelCase : Union[str, Any] = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ,) -> Dict:
_lowerCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Dict = SpeechaTextaConfig.from_pretrained(
_lowerCamelCase ,vocab_size=_lowerCamelCase ,decoder_layers=_lowerCamelCase ,do_stable_layer_norm=_lowerCamelCase )
_lowerCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Dict = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : str = WavaVecaModel(_lowerCamelCase )
_lowerCAmelCase : List[str] = recursively_load_weights_wavaveca(model.encoder ,_lowerCamelCase )
_lowerCAmelCase : Tuple = SpeechaTextaForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_lowerCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=_lowerCamelCase ,decoder=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = False
# add projection layer
_lowerCAmelCase : Any = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Dict = create_vocab_dict(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"""vocab.json""" ) ,"""w""" ) as fp:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCamelCase ,"""vocab.json""" ) )
tokenizer.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = hf_wavavec.config.to_dict()
_lowerCAmelCase : Tuple = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer.eos_token_id
_lowerCAmelCase : int = "speech_to_text_2"
_lowerCAmelCase : List[str] = "wav2vec2"
_lowerCAmelCase : Dict = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_a : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 213 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
UpperCamelCase_ : int = False
def _A ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : List[Any] , **UpperCAmelCase_ : str ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : int = "adapt act apte"
return input_text, output_text
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : List[str] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
SCREAMING_SNAKE_CASE : str = "I am a small frog."
SCREAMING_SNAKE_CASE : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : int = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
SCREAMING_SNAKE_CASE : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE : Optional[int] = "."
SCREAMING_SNAKE_CASE : Dict = tok(UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = tok(UpperCAmelCase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = list(lowercase__ )
snake_case_ = list(lowercase__ )
snake_case_ = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ = '_'
if count > 1:
return False
else:
return "".join(lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
while True:
snake_case_ = ['$'] * len(lowercase__ )
snake_case_ = []
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
snake_case_ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case_ = '*'
snake_case_ = '*'
temp.append('X' )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
snake_case_ = list(set(lowercase__ ) )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = []
for minterm in minterms:
snake_case_ = ''
for _ in range(lowercase__ ):
snake_case_ = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = list(lowercase__ )
snake_case_ = list(lowercase__ )
snake_case_ = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = [0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
snake_case_ = 0
snake_case_ = -1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
snake_case_ = j
if count == 1:
snake_case_ = 1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
snake_case_ = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ = 0
snake_case_ = -1
snake_case_ = 0
for i in range(len(lowercase__ ) ):
snake_case_ = chart[i].count(1 )
if count_n > max_n:
snake_case_ = count_n
snake_case_ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
snake_case_ = 0
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
snake_case_ = prime_implicants[i].count('_' )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
snake_case_ = 1
return chart
def a():
'''simple docstring'''
snake_case_ = int(input('Enter the no. of variables\n' ) )
snake_case_ = [
float(lowercase__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
snake_case_ = decimal_to_binary(lowercase__ , lowercase__ )
snake_case_ = check(lowercase__ )
print('Prime Implicants are:' )
print(lowercase__ )
snake_case_ = prime_implicant_chart(lowercase__ , lowercase__ )
snake_case_ = selection(lowercase__ , lowercase__ )
print('Essential Prime Implicants are:' )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
"""simple docstring"""
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
snake_case_ = [''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1_00 , __UpperCamelCase = 5.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , ):
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
snake_case_ = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
snake_case_ = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
snake_case_ = torch.exp(__UpperCamelCase )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 46 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Any = ''
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : int = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = val
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : int=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridConfig(backbone_config=lowercase__ , image_size=3_84 , num_labels=10_00 )
SCREAMING_SNAKE_CASE__ : Any = False
# load original model from timm
SCREAMING_SNAKE_CASE__ : str = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE__ : int = ViTHybridModel(lowercase__ ).eval()
else:
SCREAMING_SNAKE_CASE__ : Tuple = ViTHybridForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# create image processor
SCREAMING_SNAKE_CASE__ : List[Any] = create_transform(**resolve_data_config({} , model=lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = transform.transforms
SCREAMING_SNAKE_CASE__ : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTHybridImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = transform(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE__ : Dict = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 85 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = BlenderbotSmallTokenizer
__snake_case = False
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ : Union[str, Any] =["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
A__ : Dict =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : List[str] =["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
A__ : Optional[Any] ={"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowercase__ ( self : List[Any] , **lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ="""adapt act apte"""
A__ : Any ="""adapt act apte"""
return input_text, output_text
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : List[str] ="""adapt act apte"""
A__ : Union[str, Any] =["""adapt""", """act""", """ap@@""", """te"""]
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A__ : str =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
A__ : str ="""I am a small frog."""
A__ : Dict =tok([src_text] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
A__ : int =tok.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : str =BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
A__ : Dict ="""I am a small frog ."""
A__ : Union[str, Any] ="""."""
A__ : Optional[int] =tok(lowerCAmelCase_ )["""input_ids"""]
A__ : List[str] =tok(lowerCAmelCase_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 215 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : int = {'vocab_file': 'spiece.model'}
__magic_name__ : int = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ : str = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
def __init__( self , __UpperCamelCase , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[MASK]" , __UpperCamelCase="[CLS]" , __UpperCamelCase = None , **__UpperCamelCase , ):
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , mask_token=__UpperCamelCase , cls_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A_ = vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def lowercase_ ( self ):
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
A_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self , __UpperCamelCase ):
A_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __UpperCamelCase ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase ):
return self.sp_model.piece_to_id(__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase ):
A_ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def lowercase_ ( self , __UpperCamelCase ):
A_ = []
A_ = ""
A_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A_ = True
A_ = []
else:
current_sub_tokens.append(__UpperCamelCase )
A_ = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
A_ = kwargs.pop("use_source_tokenizer" , __UpperCamelCase )
A_ = self.convert_ids_to_tokens(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ = []
A_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
A_ = []
sub_texts.append(__UpperCamelCase )
else:
current_sub_text.append(__UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(__UpperCamelCase ) )
else:
A_ = "".join(__UpperCamelCase )
A_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ = self.clean_up_tokenization(__UpperCamelCase )
return clean_text
else:
return text
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 608 |
import math
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float )-> float:
return math.pow(snake_case__ , 2 ) - a
def lowerCAmelCase ( snake_case__ : float )-> float:
return 2 * x
def lowerCAmelCase ( snake_case__ : float )-> float:
A_ = 2.0
while start <= a:
A_ = math.pow(snake_case__ , 2 )
return start
def lowerCAmelCase ( snake_case__ : float , snake_case__ : int = 9999 , snake_case__ : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 )-> float:
if a < 0:
raise ValueError("math domain error" )
A_ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
A_ = value
A_ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 608 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.