code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
__lowerCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
__lowerCAmelCase = "document_qa"
__lowerCAmelCase = AutoProcessor
__lowerCAmelCase = VisionEncoderDecoderModel
__lowerCAmelCase = ["image", "text"]
__lowerCAmelCase = ["text"]
def __init__( self , *__A , **__A ) -> Tuple:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> List[str]:
a ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
a =task_prompt.replace('''{user_input}''' , __A )
a =self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='''pt''' ).input_ids
a =self.pre_processor(__A , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =self.pre_processor.batch_decode(__A )[0]
a =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
a =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
a =re.sub(r'''<.*?>''' , '''''' , __A , count=1 ).strip() # remove first task start token
a =self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 81
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _UpperCAmelCase ( snake_case = 50_00 ):
"""simple docstring"""
_lowerCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
_lowerCAmelCase = pentagonal_nums[j]
_lowerCAmelCase = pentagonal_i + pentagonal_j
_lowerCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 82
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=13 ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=99 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=36 ,lowerCamelCase__ : Tuple=6 ,lowerCamelCase__ : List[str]=6 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : Union[str, Any]=37 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : List[Any]=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_input_mask
_UpperCamelCase : List[str] = use_token_type_ids
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : int = vocab_size
_UpperCamelCase : int = embedding_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Any = num_hidden_groups
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : int = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = num_labels
_UpperCamelCase : str = num_choices
_UpperCamelCase : str = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Optional[int] = None
if self.use_input_mask:
_UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCamelCase : Any = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_UpperCamelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = AlbertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
_UpperCamelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = AlbertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,sentence_order_label=lowerCamelCase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = AlbertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = AlbertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[Any] = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.num_labels
_UpperCamelCase : Optional[Any] = AlbertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = AlbertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Tuple = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = self.num_choices
_UpperCamelCase : List[str] = AlbertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_UpperCamelCase : List[Any] = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Dict = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Any=False ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCamelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__ )
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__ )
return inputs_dict
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = AlbertModelTester(self )
_UpperCamelCase : Any = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = AlbertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Tuple = AlbertModel.from_pretrained('albert-base-v2' )
_UpperCamelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : str = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *__A , **__A ) -> str:
pass
def _snake_case ( lowercase__ : Image ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def _snake_case ( lowercase__ : Image ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = np.array(lowercase__ )
lowerCAmelCase_ :str = npimg.shape
return {"hash": hashimage(lowercase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase_ :int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :int = MaskGenerationPipeline(model=__A , image_processor=__A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
lowerCAmelCase_ :Optional[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase_ :List[str] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = """facebook/sam-vit-huge"""
lowerCAmelCase_ :List[str] = pipeline("""mask-generation""" , model=__A )
lowerCAmelCase_ :Tuple = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase_ :Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] , )
| 84
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
'''simple docstring'''
import string
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = ""
for i in sequence:
snake_case_ = ord(snake_case )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = string.ascii_letters
snake_case_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case )] if c in letters else c for c in sequence )
def UpperCamelCase_( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
snake_case_ = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=snake_case )} seconds' )
print(f'> atbash(): {timeit("atbash(printable)" , setup=snake_case )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 85
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 86
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''DPTFeatureExtractor''']
UpperCamelCase = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = config_class
__magic_name__ = has_text_modality
__magic_name__ = kwargs
__magic_name__ = common_properties
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase__ ):
try:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase__ ):
try:
__magic_name__ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """config.json""" )
config_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.config_class.from_json_file(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase__ )
__magic_name__ = self.config_class.from_pretrained(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict )
__magic_name__ = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
config_first.save_pretrained(UpperCamelCase__ )
__magic_name__ = self.config_class.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__magic_name__ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.config_class.is_composition:
return
__magic_name__ = self.config_class()
self.parent.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(UpperCamelCase__ )
__magic_name__ = self.config_class(**UpperCamelCase__ )
__magic_name__ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase__ , UpperCamelCase__ ) != value:
wrong_values.append((key, getattr(UpperCamelCase__ , UpperCamelCase__ ), value) )
if len(UpperCamelCase__ ) > 0:
__magic_name__ = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 88
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
if prompt is not None:
__lowerCamelCase = prompt
if generate_kwargs is not None:
__lowerCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__lowerCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
'''simple docstring'''
__lowerCamelCase = load_image(lowerCamelCase__ )
if prompt is not None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(
f"""Received an invalid text input, got - {type(lowerCamelCase__ )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
__lowerCamelCase = self.model.config.model_type
if model_type == "git":
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids
__lowerCamelCase = [self.tokenizer.cls_token_id] + input_ids
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , header_text=lowerCamelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(lowerCamelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase__ )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCamelCase = None
return model_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[str]:
'''simple docstring'''
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , lowerCamelCase__ )
and all(x is None for x in model_inputs['input_ids'] )
):
__lowerCamelCase = None
if generate_kwargs is None:
__lowerCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCamelCase = model_inputs.pop(self.model.main_input_name )
__lowerCamelCase = self.model.generate(lowerCamelCase__ , **lowerCamelCase__ , **lowerCamelCase__ )
return model_outputs
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = []
for output_ids in model_outputs:
__lowerCamelCase = {
'generated_text': self.tokenizer.decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , )
}
records.append(lowerCamelCase__ )
return records
| 90
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
UpperCAmelCase_ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def _A (__a ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
SCREAMING_SNAKE_CASE_ : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase__ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowercase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
inspect_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = path + '''.py'''
assert script_name in os.listdir(__SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
inspect_metric(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = path + '''.py'''
assert script_name in os.listdir(__SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE ):
get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_config_names(__SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = get_dataset_infos(__SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
lowercase_ : List[Any] = expected_configs[0]
assert expected_config in infos
lowercase_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_infos(__SCREAMING_SNAKE_CASE )
assert expected_config in infos
lowercase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE ):
get_dataset_split_names(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
| 93
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
from __future__ import annotations
snake_case : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
snake_case : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCamelCase ( UpperCAmelCase_ : list[float] ):
"""simple docstring"""
a :List[str] = []
a :List[str] = len(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
a :float = -1
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] < arr[j]:
a :Dict = arr[j]
break
result.append(UpperCAmelCase_ )
return result
def __lowerCamelCase ( UpperCAmelCase_ : list[float] ):
"""simple docstring"""
a :int = []
for i, outer in enumerate(UpperCAmelCase_ ):
a :float = -1
for inner in arr[i + 1 :]:
if outer < inner:
a :str = inner
break
result.append(UpperCAmelCase_ )
return result
def __lowerCamelCase ( UpperCAmelCase_ : list[float] ):
"""simple docstring"""
a :Optional[Any] = len(UpperCAmelCase_ )
a :list[float] = []
a :list[float] = [-1] * arr_size
for index in reversed(range(UpperCAmelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a :Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case : List[str] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 94
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] =[randint(-1_000 , 1_000 ) for i in range(10 )]
a__ : Optional[Any] =randint(-5_000 , 5_000 )
return (arr, r)
UpperCAmelCase : int = make_dataset()
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
for triplet in permutations(SCREAMING_SNAKE_CASE , 3 ):
if sum(SCREAMING_SNAKE_CASE ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE ) )
return (0, 0, 0)
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
arr.sort()
a__ : int =len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
a__ , a__ : Dict =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _A ( ):
"""simple docstring"""
a__ : Any ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
a__ : str ="\ntriplet_sum1(*dataset)\n"
a__ : str ="\ntriplet_sum2(*dataset)\n"
a__ : Optional[int] =repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10_000 )
a__ : List[Any] =repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE ), min(SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 95
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_lowerCamelCase : Any = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
sd_pipe.set_scheduler('sample_euler' )
_lowerCamelCase : Dict = 'A painting of a squirrel eating a burger'
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Dict = sd_pipe([prompt] , generator=lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Dict = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowerCamelCase : Any = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
sd_pipe.set_scheduler('sample_euler' )
_lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger'
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : Tuple = sd_pipe([prompt] , generator=lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_lowerCamelCase : Optional[int] = output.images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : List[str] = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_lowerCamelCase : int = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_lowerCamelCase : int = 'A painting of a squirrel eating a burger'
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : Any = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=lowercase , )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 96
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 97
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase__ : int = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowerCAmelCase__ : Any = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase__ : str = sorted(arg_to_scheduler.keys())
lowerCAmelCase__ : List[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Dict ,lowerCamelCase__ : argparse.Namespace ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Any="base" ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Optional[int] ,):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase__ )
UpperCAmelCase__ = 0
UpperCAmelCase__ = Path(self.hparams.output_dir )
UpperCAmelCase__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'num_labels': num_labels} if num_labels is not None else {}) ,cache_dir=lowerCamelCase__ ,**lowerCamelCase__ ,)
else:
UpperCAmelCase__ = config
UpperCAmelCase__ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams ,lowerCamelCase__ ,lowerCamelCase__ ):
assert hasattr(self.config ,lowerCamelCase__ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config ,lowerCamelCase__ ,getattr(self.hparams ,lowerCamelCase__ ) )
if tokenizer is None:
UpperCAmelCase__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=lowerCamelCase__ ,)
else:
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = MODEL_MODES[mode]
if model is None:
UpperCAmelCase__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=lowerCamelCase__ ,)
else:
UpperCAmelCase__ = model
def __lowerCAmelCase ( self : Union[str, Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = self.model_type.from_pretrained(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase__ = get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
UpperCAmelCase__ = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.model
UpperCAmelCase__ = ['bias', 'LayerNorm.weight']
UpperCAmelCase__ = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase__ = Adafactor(
lowerCamelCase__ ,lr=self.hparams.learning_rate ,scale_parameter=lowerCamelCase__ ,relative_step=lowerCamelCase__ )
else:
UpperCAmelCase__ = AdamW(
lowerCamelCase__ ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
UpperCAmelCase__ = optimizer
UpperCAmelCase__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ):
return self.validation_step(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : List[str] ):
return self.validation_end(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : int ):
if stage == "test":
UpperCAmelCase__ = len(self.test_dataloader().dataset )
else:
UpperCAmelCase__ = self.get_dataloader('train' ,self.hparams.train_batch_size ,shuffle=lowerCamelCase__ )
UpperCAmelCase__ = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : bool = False ):
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : List[Any] ):
return self.train_loader
def __lowerCAmelCase ( self : Optional[int] ):
return self.get_dataloader('dev' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
return self.get_dataloader('test' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : List[Any] ):
return os.path.join(
self.hparams.data_dir ,'cached_{}_{}_{}'.format(
lowerCamelCase__ ,list(filter(lowerCamelCase__ ,self.hparams.model_name_or_path.split('/' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Dict[str, Any] ):
UpperCAmelCase__ = self.output_dir.joinpath('best_tfmr' )
UpperCAmelCase__ = self.step_count
self.model.save_pretrained(lowerCamelCase__ )
self.tokenizer.save_pretrained(lowerCamelCase__ )
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
parser.add_argument(
'--model_name_or_path' ,default=lowerCamelCase__ ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to pretrained model or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--config_name' ,default='' ,type=lowerCamelCase__ ,help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' ,default=lowerCamelCase__ ,type=lowerCamelCase__ ,help='Pretrained tokenizer name or path if not the same as model_name' ,)
parser.add_argument(
'--cache_dir' ,default=str(Path(lowerCamelCase__ ).parent / 'test_run' / 'cache' ) ,type=lowerCamelCase__ ,help='Where do you want to store the pre-trained models downloaded from huggingface.co' ,)
parser.add_argument(
'--encoder_layerdrop' ,type=lowerCamelCase__ ,help='Encoder layer dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--decoder_layerdrop' ,type=lowerCamelCase__ ,help='Decoder layer dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--dropout' ,type=lowerCamelCase__ ,help='Dropout probability (Optional). Goes into model.config' ,)
parser.add_argument(
'--attention_dropout' ,type=lowerCamelCase__ ,help='Attention dropout probability (Optional). Goes into model.config' ,)
parser.add_argument('--learning_rate' ,default=5e-5 ,type=lowerCamelCase__ ,help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' ,default='linear' ,choices=lowerCamelCase__ ,metavar=lowerCamelCase__ ,type=lowerCamelCase__ ,help='Learning rate scheduler' ,)
parser.add_argument('--weight_decay' ,default=0.0 ,type=lowerCamelCase__ ,help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=lowerCamelCase__ ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' ,default=0 ,type=lowerCamelCase__ ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' ,default=4 ,type=lowerCamelCase__ ,help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' ,dest='max_epochs' ,default=3 ,type=lowerCamelCase__ )
parser.add_argument('--train_batch_size' ,default=32 ,type=lowerCamelCase__ )
parser.add_argument('--eval_batch_size' ,default=32 ,type=lowerCamelCase__ )
parser.add_argument('--adafactor' ,action='store_true' )
class snake_case ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class snake_case ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase__ )
class snake_case ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = trainer.lr_schedulers[0]['scheduler']
UpperCAmelCase__ = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : pl.Trainer ,lowerCamelCase__ : pl.LightningModule ):
rank_zero_info('***** Validation results *****' )
UpperCAmelCase__ = trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCamelCase__ ,str(metrics[key] ) ) )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : pl.Trainer ,lowerCamelCase__ : pl.LightningModule ):
rank_zero_info('***** Test results *****' )
UpperCAmelCase__ = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase__ = os.path.join(pl_module.hparams.output_dir ,'test_results.txt' )
with open(lowerCamelCase__ ,'w' ) as writer:
for key in sorted(lowerCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowerCamelCase__ ,str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(lowerCamelCase__ ,str(metrics[key] ) ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(lowerCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCamelCase , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[] , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase )
if logging_callback is None:
UpperCAmelCase__ = LoggingCallback()
UpperCAmelCase__ = {}
if args.fpaa:
UpperCAmelCase__ = 1_6
if args.gpus > 1:
UpperCAmelCase__ = 'auto'
UpperCAmelCase__ = 'ddp'
UpperCAmelCase__ = args.accumulate_grad_batches
UpperCAmelCase__ = None
UpperCAmelCase__ = 'auto'
UpperCAmelCase__ = pl.Trainer.from_argparse_args(
lowerCamelCase , weights_summary=lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase , )
if args.do_train:
trainer.fit(lowerCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 98
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Optional[int] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Union[PIL.Image.Image, np.ndarray]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
if latents is None:
a__ : List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}')
a__ : List[str] = latents.to(lowercase)
a__ : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self , lowercase=0) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
a__ : Any = torch.device(F'cuda:{gpu_id}')
a__ : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase)
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
if self.device != torch.device('meta') or not hasattr(self.image_encoder , '_hf_hook'):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Any:
'''simple docstring'''
if isinstance(lowercase , lowercase) and isinstance(image[0] , torch.Tensor):
a__ : Dict = torch.cat(lowercase , axis=0) if image[0].ndim == 4 else torch.stack(lowercase , axis=0)
if not isinstance(lowercase , torch.Tensor):
a__ : List[Any] = self.image_processor(lowercase , return_tensors='pt').pixel_values[0].unsqueeze(0)
a__ : Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=lowercase)
a__ : int = self.image_encoder(lowercase)['last_hidden_state']
a__ : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__ : Optional[int] = image_embeds.repeat_interleave(lowercase , dim=0)
if do_classifier_free_guidance:
a__ : Tuple = torch.zeros_like(lowercase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : int = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase)
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase , PIL.Image.Image):
a__ : List[str] = 1
elif isinstance(lowercase , torch.Tensor):
a__ : List[str] = image.shape[0]
elif isinstance(lowercase , lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
a__ : List[str] = len(lowercase)
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase)}')
a__ : Tuple = self._execution_device
a__ : List[Any] = batch_size * num_images_per_prompt
a__ : Optional[Any] = guidance_scale > 1.0
a__ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase)
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase)
a__ : str = self.scheduler.timesteps
a__ : Tuple = self.prior.config.num_embeddings
a__ : Optional[int] = self.prior.config.embedding_dim
a__ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__ : Tuple = latents.reshape(latents.shape[0] , lowercase , lowercase)
for i, t in enumerate(self.progress_bar(lowercase)):
# expand the latents if we are doing classifier free guidance
a__ : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a__ : Optional[int] = self.scheduler.scale_model_input(lowercase , lowercase)
a__ : Tuple = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
a__ , a__ : Any = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__ : Any = noise_pred.chunk(2)
a__ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__ : List[str] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase)
a__ : List[Any] = []
for i, latent in enumerate(lowercase):
print()
a__ : Dict = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase)
a__ : Union[str, Any] = torch.stack(lowercase)
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}')
a__ : List[Any] = images.cpu().numpy()
if output_type == "pil":
a__ : Any = [self.numpy_to_pil(lowercase) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase)
| 99
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = ['''input_values''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1_6_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 7_6_0_0 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = num_mel_bins
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = win_length
__SCREAMING_SNAKE_CASE = win_function
__SCREAMING_SNAKE_CASE = frame_signal_scale
__SCREAMING_SNAKE_CASE = fmin
__SCREAMING_SNAKE_CASE = fmax
__SCREAMING_SNAKE_CASE = mel_floor
__SCREAMING_SNAKE_CASE = reduction_factor
__SCREAMING_SNAKE_CASE = win_length * sampling_rate // 1_0_0_0
__SCREAMING_SNAKE_CASE = hop_length * sampling_rate // 1_0_0_0
__SCREAMING_SNAKE_CASE = optimal_fft_length(self.sample_size)
__SCREAMING_SNAKE_CASE = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0):
if attention_mask is not None:
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__ , np.intaa)
__SCREAMING_SNAKE_CASE = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1)):
__SCREAMING_SNAKE_CASE = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE = padding_value
normed_input_values.append(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def snake_case_ ( self , lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""")
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
if audio is not None:
__SCREAMING_SNAKE_CASE = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE = inputs_target["""input_values"""]
__SCREAMING_SNAKE_CASE = inputs_target.get("""attention_mask""")
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE = decoder_attention_mask
return inputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = isinstance(lowerCAmelCase__ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCAmelCase__ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray):
__SCREAMING_SNAKE_CASE = np.asarray(lowerCAmelCase__ , dtype=np.floataa)
elif isinstance(lowerCAmelCase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
__SCREAMING_SNAKE_CASE = speech.astype(np.floataa)
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE = [self._extract_mel_features(lowerCAmelCase__) for waveform in speech]
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_values""": features})
__SCREAMING_SNAKE_CASE = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_values""": speech})
__SCREAMING_SNAKE_CASE = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray):
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCAmelCase__ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
__SCREAMING_SNAKE_CASE = [array.astype(np.floataa) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
__SCREAMING_SNAKE_CASE = input_values.astype(np.floataa)
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCAmelCase__ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value)
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(lowerCAmelCase__)
return padded_inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 100
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''megatron-bert'''
def __init__( self ,A__=2_9_0_5_6 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
| 101
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : int , A_ : Any , A_ : List[Any]=1_3 , A_ : Any=3 , A_ : List[str]=2_2_4 , A_ : Optional[Any]=3_0 , A_ : Optional[Any]=4_0_0 , A_ : Any=True , A_ : str=None , A_ : Tuple=True , A_ : str=[0.5, 0.5, 0.5] , A_ : Optional[int]=[0.5, 0.5, 0.5] , ):
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Dict = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Dict = size
lowerCAmelCase_ : str = do_normalize
lowerCAmelCase_ : Any = image_mean
lowerCAmelCase_ : Optional[int] = image_std
def UpperCAmelCase__ ( self : Optional[int]):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[int] = EfficientFormerImageProcessorTester(self)
@property
def UpperCAmelCase__ ( self : Optional[int]):
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , '''image_mean'''))
self.assertTrue(hasattr(A_ , '''image_std'''))
self.assertTrue(hasattr(A_ , '''do_normalize'''))
self.assertTrue(hasattr(A_ , '''do_resize'''))
self.assertTrue(hasattr(A_ , '''size'''))
def UpperCAmelCase__ ( self : List[str]):
pass
def UpperCAmelCase__ ( self : Optional[int]):
# Initialize image_processor
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
lowerCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : str = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : str):
# Initialize image_processor
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
lowerCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : str = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : str):
# Initialize image_processor
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
lowerCAmelCase_ : Dict = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processor(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 103
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : int ,lowercase__ : List[str]=1_3 ,lowercase__ : List[str]=1_0 ,lowercase__ : int=3 ,lowercase__ : Tuple=2 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : List[str]=2 ,lowercase__ : List[Any]=True ,lowercase__ : Any=True ,lowercase__ : Optional[int]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : str=3_7 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Dict=0.1 ,lowercase__ : Any=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=0.9 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = tubelet_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowercase = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = VideoMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = VideoMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.batch_size ,-1 ).bool()
__lowercase = model(lowercase__ ,lowercase__ )
# model only returns predictions for masked patches
__lowercase = mask.sum().item()
__lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE : int = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = VideoMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str]=False ):
__lowercase = copy.deepcopy(lowercase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.model_tester.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.model_tester.batch_size ,-1 ).bool()
__lowercase = bool_masked_pos.to(lowercase__ )
if return_labels:
if model_class in [
*get_values(lowercase__ ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = VideoMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
__lowercase = len(lowercase__ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
self.assertEqual(out_len + 1 ,len(lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) ,lowercase__ )
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__lowercase = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# add boolean mask, indicating which patches to mask
__lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' )
__lowercase = torch.load(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ,device=lowercase__ )
self.assertEqual(outputs.logits.shape ,lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowercase = torch.tensor([0.5_1_4_2] ,device=lowercase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowercase__ ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=lowercase__ ).to(
lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.tensor(torch.tensor([0.6_4_6_9] ) ,device=lowercase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowercase__ ,atol=1e-4 ) )
| 104
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 105
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( A_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=10 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="divided_space_time" , __lowerCamelCase : Tuple=None , ) -> Optional[int]:
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = attention_type
a = initializer_range
a = scope
a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a = (image_size // patch_size) ** 2
a = (num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a = self.num_labels
return config
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Tuple:
a = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# verify the logits shape
a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = TimesformerModelTester(self )
a = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=False ) -> Any:
a = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCAmelCase ( self : List[Any] ) -> int:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : int ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : int ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length
a = self.model_tester.num_frames
a = True
a = False
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a = len(__lowerCamelCase )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Any ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
a = np.load(A )
return list(A )
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : int ) -> List[Any]:
a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCamelCase )
a = self.default_image_processor
a = prepare_video()
a = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 107
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = hidden_states.shape
UpperCAmelCase : str = jax.image.resize(
_SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
UpperCAmelCase : Optional[int] = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int = None
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = None
__lowerCAmelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase : Optional[Any] = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase : str = nn.Dense(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
UpperCAmelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCAmelCase : List[str] = nn.Dropout(self.dropout_prob )
UpperCAmelCase : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase : Dict = None
if use_nin_shortcut:
UpperCAmelCase : str = nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = hidden_states
UpperCAmelCase : Dict = self.norma(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = nn.swish(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.conva(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.time_emb_proj(nn.swish(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = jnp.expand_dims(jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , 1 )
UpperCAmelCase : Union[str, Any] = hidden_states + temb
UpperCAmelCase : Any = self.norma(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = nn.swish(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.conva(_SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
UpperCAmelCase : Dict = self.conv_shortcut(_SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 109
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
snake_case__ : int = {
"""input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case__ : List[Any] = model(A_ )["""last_hidden_state"""]
snake_case__ : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
snake_case__ : Tuple = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 35
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
from math import sqrt
def lowerCAmelCase_ ( __lowerCAmelCase = 1_00_00_00 )-> int:
'''simple docstring'''
UpperCAmelCase : List[str] =0
UpperCAmelCase : int =0
UpperCAmelCase : Optional[Any] =42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowercase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A ( _a ):
__snake_case = 'yolos'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=[512, 864], UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=100, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=1, UpperCamelCase__=5, UpperCamelCase__=2, UpperCamelCase__=5, UpperCamelCase__=2, UpperCamelCase__=0.1, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**A_ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = num_detection_tokens
lowerCAmelCase_ = use_mid_position_embeddings
lowerCAmelCase_ = auxiliary_loss
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
class A ( _a ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 12
| 278
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A : Any = logging.get_logger(__name__)
class A (_a ):
'''simple docstring'''
__lowerCamelCase : str = ['''pixel_values''']
def __init__( self : Optional[Any] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 2_55 , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A_ )
A__ = size if size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(A_ )
A__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(A_ , default_to_square=A_ , param_name="""crop_size""" )
A__ = do_resize
A__ = do_rescale
A__ = do_normalize
A__ = do_center_crop
A__ = crop_size
A__ = size
A__ = resample
A__ = rescale_factor
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
A__ = get_size_dict(A_ )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(A_ , size=size["""shortest_edge"""] , default_to_square=A_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A__ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a_ ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ) -> List[str]:
"""simple docstring"""
A__ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def a_ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a_ ( self : int , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(A_ , param_name="""crop_size""" , default_to_square=A_ )
A__ = resample if resample is not None else self.resample
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(A_ )
if not is_batched(A_ ):
A__ = [images]
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(A_ ) for image in images]
if do_resize:
A__ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
A__ = [to_channel_dimension_format(A_ , A_ ) for image in images]
A__ = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 274
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
from manim import *
class a ( _a ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.2_5 , width=0.2_5 )
_a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_a = Text('CPU' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('GPU' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('Model' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
_a = []
_a = []
_a = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
_a = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = Text('Loaded Checkpoint' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
_a = []
_a = []
for i, rect in enumerate(A_ ):
_a = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
_a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
_a = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(*A_ ).arrange(A_ , buff=0 )
_a = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
_a = Text('Disk' , font_size=24 )
_a = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
_a = []
for i, rect in enumerate(A_ ):
_a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
_a = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 168
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowercase ( _a ):
"""simple docstring"""
_a = 'transfo-xl'
_a = ['mems']
_a = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase_=267735 , UpperCamelCase_=[20000, 40000, 200000] , UpperCamelCase_=1024 , UpperCamelCase_=1024 , UpperCamelCase_=16 , UpperCamelCase_=64 , UpperCamelCase_=4096 , UpperCamelCase_=4 , UpperCamelCase_=False , UpperCamelCase_=18 , UpperCamelCase_=1600 , UpperCamelCase_=1000 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=-1 , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="normal" , UpperCamelCase_=0.01 , UpperCamelCase_=0.01 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-5 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = vocab_size
UpperCamelCase__ :int = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
UpperCamelCase__ :Dict = [False] + [True] * len(self.cutoffs )
else:
UpperCamelCase__ :List[str] = [False] + [False] * len(self.cutoffs )
UpperCamelCase__ :List[Any] = d_model
UpperCamelCase__ :str = d_embed
UpperCamelCase__ :List[Any] = d_head
UpperCamelCase__ :Dict = d_inner
UpperCamelCase__ :Any = div_val
UpperCamelCase__ :Tuple = pre_lnorm
UpperCamelCase__ :Dict = n_layer
UpperCamelCase__ :Any = n_head
UpperCamelCase__ :Tuple = mem_len
UpperCamelCase__ :List[Any] = same_length
UpperCamelCase__ :List[str] = attn_type
UpperCamelCase__ :Dict = clamp_len
UpperCamelCase__ :List[str] = sample_softmax
UpperCamelCase__ :List[Any] = adaptive
UpperCamelCase__ :Optional[Any] = dropout
UpperCamelCase__ :Optional[Any] = dropatt
UpperCamelCase__ :Tuple = untie_r
UpperCamelCase__ :int = init
UpperCamelCase__ :Any = init_range
UpperCamelCase__ :Optional[int] = proj_init_std
UpperCamelCase__ :Tuple = init_std
UpperCamelCase__ :List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 97
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Dict , __lowercase : Dict=1024 , __lowercase : int=1024 , __lowercase : Dict=False , **__lowercase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
_UpperCAmelCase = SeqaSeqDataset(_lowercase , _lowercase , _lowercase , _lowercase , type_path="train" , **_lowercase )
_UpperCAmelCase = tok.pad_token_id
def get_lens(__lowercase : List[Any] ):
_UpperCAmelCase = tqdm(
DataLoader(_lowercase , batch_size=512 , num_workers=8 , shuffle=_lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase = []
for batch in dl:
_UpperCAmelCase = batch["input_ids"].ne(_lowercase ).sum(1 ).tolist()
_UpperCAmelCase = batch["labels"].ne(_lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowercase , _lowercase ):
max_lens.append(max(_lowercase , _lowercase ) )
else:
max_lens.extend(_lowercase )
return max_lens
_UpperCAmelCase = get_lens(_lowercase )
_UpperCAmelCase = SeqaSeqDataset(_lowercase , _lowercase , _lowercase , _lowercase , type_path="val" , **_lowercase )
_UpperCAmelCase = get_lens(_lowercase )
pickle_save(_lowercase , train_ds.len_file )
pickle_save(_lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 22
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
from __future__ import annotations
__A = []
def lowerCAmelCase_ ( __a , __a , __a ) -> bool:
"""simple docstring"""
for i in range(len(_lowercase ) ):
if board[row][i] == 1:
return False
for i in range(len(_lowercase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_lowercase , -1 , -1 ) , range(_lowercase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_lowercase , -1 , -1 ) , range(_lowercase , len(_lowercase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
if row >= len(_lowercase ):
solution.append(_lowercase )
printboard(_lowercase )
print()
return True
for i in range(len(_lowercase ) ):
if is_safe(_lowercase , _lowercase , _lowercase ):
lowerCamelCase__: Optional[Any] =1
solve(_lowercase , row + 1 )
lowerCamelCase__: List[str] =0
return False
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
for i in range(len(_lowercase ) ):
for j in range(len(_lowercase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
__A = 8
__A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
import math
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = n
while left <= right:
snake_case_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case_ = mid - 1
else:
snake_case_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[Any] = arr.split(''',''' )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Dict = [int(self.array[0] )] * len(self.array )
_A: Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_A: Any = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_A: Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase__ : int = input('please input some numbers:')
UpperCAmelCase__ : List[str] = SubArray(whole_array)
UpperCAmelCase__ : str = array.solve_sub_array()
print(('the results is:', re))
| 121
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
__lowerCAmelCase: Dict = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: List[str] = knapsack(_lowercase , _lowercase , _lowercase , _lowercase , index + 1 )
if weights[index] <= max_weight:
__lowerCAmelCase: str = values[index] + knapsack(
_lowercase , _lowercase , _lowercase , max_weight - weights[index] , index + 1 )
return max(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
return int(x / 2**20 )
class __snake_case :
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase : Optional[Any] =torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case__ ) -> Optional[int]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase : Any =torch.cuda.memory_allocated()
UpperCAmelCase : Optional[int] =torch.cuda.max_memory_allocated()
UpperCAmelCase : str =bamb(self.end - self.begin )
UpperCAmelCase : Optional[Any] =bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 3_20 , __lowerCAmelCase = 1_60 , )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(_lowercase )
UpperCAmelCase : Dict =load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Optional[int] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : List[Any] =datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : Dict =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase : Optional[int] =DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
UpperCAmelCase : Any =DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : List[Any] =config['''lr''']
UpperCAmelCase : List[Any] =int(config['''num_epochs'''] )
UpperCAmelCase : Union[str, Any] =int(config['''seed'''] )
UpperCAmelCase : List[Any] =int(config['''batch_size'''] )
UpperCAmelCase : Optional[Any] =args.model_name_or_path
set_seed(_lowercase )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =get_dataloaders(_lowercase , _lowercase , _lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[Any] =AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
UpperCAmelCase : str =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase : Optional[int] =optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase : Dict =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =(len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase : Optional[int] =get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
UpperCAmelCase : Optional[Any] =DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase : List[str] =0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase : Any =0
# Now we train the model
UpperCAmelCase : Optional[Any] ={}
for epoch in range(_lowercase , _lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowercase ):
UpperCAmelCase : Optional[Any] =model(**_lowercase )
UpperCAmelCase : int =outputs.loss
UpperCAmelCase : Dict =loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase : List[Any] =tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_lowercase , default=_lowercase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_lowercase , default=3_20 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_lowercase , default=1_60 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase : Tuple =parser.parse_args()
UpperCAmelCase : int ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 348
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_A = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
_A = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
_A = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class A ( _a ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = ['input_ids', 'attention_mask']
__snake_case = []
__snake_case = []
def __init__( self, UpperCamelCase__, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="</s>", UpperCamelCase__="</s>", UpperCamelCase__="<s>", UpperCamelCase__="<unk>", UpperCamelCase__="<pad>", UpperCamelCase__="<mask>", UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(A_, lstrip=A_, rstrip=A_ ) if isinstance(A_, A_ ) else mask_token
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ = kwargs.get('''additional_special_tokens''', [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_, tgt_lang=A_, eos_token=A_, unk_token=A_, sep_token=A_, cls_token=A_, pad_token=A_, mask_token=A_, sp_model_kwargs=self.sp_model_kwargs, **A_, )
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
lowerCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ = 1
lowerCAmelCase_ = len(self.sp_model )
lowerCAmelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
lowerCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ = src_lang if src_lang is not None else '''en_XX'''
lowerCAmelCase_ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(A_, out_type=A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
lowerCAmelCase_ = True
lowerCAmelCase_ = []
else:
current_sub_tokens.append(A_ )
lowerCAmelCase_ = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
A_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_, token_ids_a=A_, already_has_special_tokens=A_ )
lowerCAmelCase_ = [1] * len(self.prefix_tokens )
lowerCAmelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = self(A_, add_special_tokens=A_, return_tensors=A_, **A_ )
lowerCAmelCase_ = self.convert_tokens_to_ids(A_ )
lowerCAmelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = "en_XX", UpperCamelCase__ = None, UpperCamelCase__ = "ro_RO", **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(A_, A_, **A_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.lang_code_to_id[src_lang]
lowerCAmelCase_ = [self.cur_lang_code_id]
lowerCAmelCase_ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase_ = [self.cur_lang_code_id]
lowerCAmelCase_ = [self.eos_token_id]
| 278
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(_a )
class a ( _a ):
_lowerCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a = None
if self.model.config.prefix is not None:
_a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a = self._sanitize_parameters(prefix=A_ , **self._forward_params )
_a = {**self._preprocess_params, **preprocess_params}
_a = {**self._forward_params, **forward_params}
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Optional[Any]:
_a = {}
if prefix is not None:
_a = prefix
if prefix:
_a = self.tokenizer(
A_ , padding=A_ , add_special_tokens=A_ , return_tensors=self.framework )
_a = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
_a = handle_long_generation
preprocess_params.update(A_ )
_a = generate_kwargs
_a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_a = ReturnType.TENSORS
if return_type is not None:
_a = return_type
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if stop_sequence is not None:
_a = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*A_ , **A_ )
def __call__( self , __magic_name__ , **__magic_name__ ) -> Any:
return super().__call__(A_ , **A_ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__="" , __magic_name__=None , **__magic_name__ ) -> int:
_a = self.tokenizer(
prefix + prompt_text , padding=A_ , add_special_tokens=A_ , return_tensors=self.framework )
_a = prompt_text
if handle_long_generation == "hole":
_a = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a = generate_kwargs['max_new_tokens']
else:
_a = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_a = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_a = inputs['attention_mask'][:, -keep_length:]
return inputs
def __UpperCAmelCase ( self , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
_a = model_inputs['input_ids']
_a = model_inputs.get('attention_mask' , A_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a = None
_a = None
_a = 1
else:
_a = input_ids.shape[0]
_a = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_a = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_a = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a = self.model.generate(input_ids=A_ , attention_mask=A_ , **A_ )
_a = generated_sequence.shape[0]
if self.framework == "pt":
_a = generated_sequence.reshape(A_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a = tf.reshape(A_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=ReturnType.FULL_TEXT , __magic_name__=True ) -> Tuple:
_a = model_outputs['generated_sequence'][0]
_a = model_outputs['input_ids']
_a = model_outputs['prompt_text']
_a = generated_sequence.numpy().tolist()
_a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a = self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a = 0
else:
_a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , ) )
if return_type == ReturnType.FULL_TEXT:
_a = prompt_text + text[prompt_length:]
else:
_a = text[prompt_length:]
_a = {'generated_text': all_text}
records.append(A_ )
return records
| 168
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__snake_case = datasets.logging.get_logger(__name__)
__snake_case = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
__snake_case = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
__snake_case = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.config_name == "default":
UpperCamelCase__ :Optional[Any] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCamelCase__ :Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ):
'''simple docstring'''
if gpus is None:
UpperCamelCase__ :Union[str, Any] = 1 if torch.cuda.is_available() else 0
UpperCamelCase__ :Optional[int] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCamelCase__ :int = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )]
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ )
return {"mean_score": mean_score, "scores": scores}
| 97
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[int] ) -> str:
'''simple docstring'''
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
_UpperCAmelCase = len(_lowercase )
_UpperCAmelCase = len(_lowercase )
_UpperCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_UpperCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_UpperCAmelCase = i
_UpperCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__A = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__A = 3e8 # unit of c : m * s^-1
def lowerCAmelCase_ ( __a , __a , __a ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCamelCase__: str =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase__: str =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase__: str =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case_ = 6
snake_case_ = 1
snake_case_ = 1901
snake_case_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 187
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase ( _a ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = '''biogpt'''
def __init__( self : str , lowerCAmelCase_ : List[Any]=4_2_3_8_4 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : List[str]=2_4 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=4_0_9_6 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[int]=1e-12 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Union[str, Any]=2 , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
_A: int = vocab_size
_A: Optional[Any] = max_position_embeddings
_A: Dict = hidden_size
_A: Optional[int] = num_hidden_layers
_A: Any = num_attention_heads
_A: Tuple = intermediate_size
_A: Any = hidden_act
_A: int = hidden_dropout_prob
_A: List[str] = attention_probs_dropout_prob
_A: int = initializer_range
_A: str = layer_norm_eps
_A: str = scale_embedding
_A: str = use_cache
_A: Union[str, Any] = layerdrop
_A: Dict = activation_dropout
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 121
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class snake_case ( _a ):
def __init__( self : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str)-> Union[str, Any]:
'''simple docstring'''
super().__init__(*A_ , **A_)
__lowerCAmelCase: List[Any] = {}
def lowercase_ ( self : int , UpperCamelCase__ : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: str = super().add_tokens(A_ , *A_ , **A_)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer.")
def lowercase_ ( self : int , UpperCamelCase__ : Dict , *UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=1 , **UpperCamelCase__ : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(A_ , *A_ , **A_)
output.append(A_)
else:
__lowerCAmelCase: Tuple = []
for i in range(A_):
__lowerCAmelCase: Union[str, Any] = placeholder_token + f"_{i}"
self.try_adding_tokens(A_ , *A_ , **A_)
output.append(A_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent")
__lowerCAmelCase: Dict = output
def lowercase_ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[Any]=1.0)-> int:
'''simple docstring'''
if isinstance(A_ , A_):
__lowerCAmelCase: str = []
for i in range(len(A_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=A_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowerCAmelCase: Optional[int] = self.token_map[placeholder_token]
__lowerCAmelCase: str = tokens[: 1 + int(len(A_) * prop_tokens_to_load)]
if vector_shuffle:
__lowerCAmelCase: Tuple = copy.copy(A_)
random.shuffle(A_)
__lowerCAmelCase: Any = text.replace(A_ , " ".join(A_))
return text
def __call__( self : Any , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1.0 , **UpperCamelCase__ : str)-> Any:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
A_ , vector_shuffle=A_ , prop_tokens_to_load=A_) , *A_ , **A_ , )
def lowercase_ ( self : Any , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Any=1.0 , **UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
A_ , vector_shuffle=A_ , prop_tokens_to_load=A_) , *A_ , **A_ , )
| 217
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10**-10 ) -> float:
snake_case__ : List[str] = a
while True:
snake_case__ : Union[str, Any] = Decimal(_lowercase ) - (
Decimal(eval(_lowercase ) ) / Decimal(eval(str(diff(_lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowercase ) ) < precision: # noqa: S307
return float(_lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 35
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
from math import sqrt
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : Dict =0
for i in range(1 , int(sqrt(_lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowercase ):
total += i + n // i
elif i == sqrt(_lowercase ):
total += i
return total - n
def lowerCAmelCase_ ( __lowerCAmelCase = 1_00_00 )-> int:
'''simple docstring'''
UpperCAmelCase : str =sum(
i
for i in range(1 , _lowercase )
if sum_of_divisors(sum_of_divisors(_lowercase ) ) == i and sum_of_divisors(_lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_A = logging.getLogger(__name__)
class A ( _a ):
__snake_case = 'summarization'
__snake_case = ['loss']
__snake_case = ROUGE_KEYS
__snake_case = 'rouge2'
def __init__( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(A_, num_labels=A_, mode=self.mode, **A_ )
use_task_specific_params(self.model, '''summarization''' )
save_git_info(self.hparams.output_dir )
lowerCAmelCase_ = Path(self.output_dir ) / '''metrics.json'''
lowerCAmelCase_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams, self.hparams_save_path )
lowerCAmelCase_ = 0
lowerCAmelCase_ = defaultdict(A_ )
lowerCAmelCase_ = self.config.model_type
lowerCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowerCAmelCase_ = {
'''data_dir''': self.hparams.data_dir,
'''max_source_length''': self.hparams.max_source_length,
'''prefix''': self.model.config.prefix or '''''',
}
lowerCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowerCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase_ = get_git_info()['''repo_sha''']
lowerCAmelCase_ = hparams.num_workers
lowerCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, A_ ):
lowerCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase_ = self.decoder_start_token_id
lowerCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer, '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
lowerCAmelCase_ = False
lowerCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase_ = self.hparams.eval_max_gen_length
else:
lowerCAmelCase_ = self.model.config.max_length
lowerCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(A_, Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir ) / '''tok_batch.json''' )
lowerCAmelCase_ = True
return readable_batch
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.model(A_, **A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer.batch_decode(
A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
return lmap(str.strip, A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer.pad_token_id
lowerCAmelCase_ , lowerCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
lowerCAmelCase_ = batch['''labels''']
if isinstance(self.model, A_ ):
lowerCAmelCase_ = self.model._shift_right(A_ )
else:
lowerCAmelCase_ = shift_tokens_right(A_, A_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase_ = decoder_input_ids
self.save_readable_batch(A_ )
lowerCAmelCase_ = self(A_, attention_mask=A_, decoder_input_ids=A_, use_cache=A_ )
lowerCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase_ = nn.CrossEntropyLoss(ignore_index=A_ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase_ = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1] ), tgt_ids.view(-1 ) )
else:
lowerCAmelCase_ = nn.functional.log_softmax(A_, dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = label_smoothed_nll_loss(
A_, A_, self.hparams.label_smoothing, ignore_index=A_ )
return (loss,)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self._step(A_ )
lowerCAmelCase_ = dict(zip(self.loss_names, A_ ) )
# tokens per batch
lowerCAmelCase_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
lowerCAmelCase_ = batch['''input_ids'''].shape[0]
lowerCAmelCase_ = batch['''input_ids'''].eq(self.pad ).sum()
lowerCAmelCase_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self._generative_step(A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__="val" ):
"""simple docstring"""
self.step_count += 1
lowerCAmelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase_ = losses['''loss''']
lowerCAmelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowerCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase_ = torch.tensor(A_ ).type_as(A_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(A_ )
lowerCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
lowerCAmelCase_ = self.step_count
self.metrics[prefix].append(A_ ) # callback writes this to self.metrics_save_path
lowerCAmelCase_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return calculate_rouge(A_, A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase_ = self.model.generate(
batch['''input_ids'''], attention_mask=batch['''attention_mask'''], use_cache=A_, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
lowerCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowerCAmelCase_ = self.ids_to_clean_text(A_ )
lowerCAmelCase_ = self.ids_to_clean_text(batch['''labels'''] )
lowerCAmelCase_ = self._step(A_ )
lowerCAmelCase_ = dict(zip(self.loss_names, A_ ) )
lowerCAmelCase_ = self.calc_generative_metrics(A_, A_ )
lowerCAmelCase_ = np.mean(lmap(A_, A_ ) )
base_metrics.update(gen_time=A_, gen_len=A_, preds=A_, target=A_, **A_ )
return base_metrics
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self._generative_step(A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.validation_epoch_end(A_, prefix='''test''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.n_obs[type_path]
lowerCAmelCase_ = self.target_lens[type_path]
lowerCAmelCase_ = self.dataset_class(
self.tokenizer, type_path=A_, n_obs=A_, max_target_length=A_, **self.dataset_kwargs, )
return dataset
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = False ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dataset(A_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase_ = dataset.make_sortish_sampler(A_, distributed=self.hparams.gpus > 1 )
return DataLoader(
A_, batch_size=A_, collate_fn=dataset.collate_fn, shuffle=A_, num_workers=self.num_workers, sampler=A_, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 )
return DataLoader(
A_, batch_sampler=A_, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
A_, batch_size=A_, collate_fn=dataset.collate_fn, shuffle=A_, num_workers=self.num_workers, sampler=A_, )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_dataloader('''train''', batch_size=self.hparams.train_batch_size, shuffle=A_ )
return dataloader
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.get_dataloader('''val''', batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.get_dataloader('''test''', batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(A_, A_ )
add_generic_args(A_, A_ )
parser.add_argument(
'''--max_source_length''', default=1024, type=A_, help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
), )
parser.add_argument(
'''--max_target_length''', default=56, type=A_, help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
), )
parser.add_argument(
'''--val_max_target_length''', default=142, type=A_, help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
), )
parser.add_argument(
'''--test_max_target_length''', default=142, type=A_, help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
), )
parser.add_argument('''--freeze_encoder''', action='''store_true''' )
parser.add_argument('''--freeze_embeds''', action='''store_true''' )
parser.add_argument('''--sortish_sampler''', action='''store_true''', default=A_ )
parser.add_argument('''--overwrite_output_dir''', action='''store_true''', default=A_ )
parser.add_argument('''--max_tokens_per_batch''', type=A_, default=A_ )
parser.add_argument('''--logger_name''', type=A_, choices=['''default''', '''wandb''', '''wandb_shared'''], default='''default''' )
parser.add_argument('''--n_train''', type=A_, default=-1, required=A_, help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''', type=A_, default=500, required=A_, help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''', type=A_, default=-1, required=A_, help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''', type=A_, default='''summarization''', required=A_, help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''', type=A_, default=0.0, required=A_ )
parser.add_argument('''--src_lang''', type=A_, default='''''', required=A_ )
parser.add_argument('''--tgt_lang''', type=A_, default='''''', required=A_ )
parser.add_argument('''--eval_beams''', type=A_, default=A_, required=A_ )
parser.add_argument(
'''--val_metric''', type=A_, default=A_, required=A_, choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''', type=A_, default=A_, help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''', type=A_, default=1, required=A_, help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''', type=A_, default=-1, required=A_, help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
), )
return parser
class A ( _a ):
__snake_case = 'translation'
__snake_case = ['loss']
__snake_case = ['bleu']
__snake_case = 'bleu'
def __init__( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(A_, **A_ )
lowerCAmelCase_ = hparams.src_lang
lowerCAmelCase_ = hparams.tgt_lang
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return calculate_bleu(A_, A_ )
def __UpperCamelCase ( _A , _A=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase_ = SummarizationModule(_lowercase )
else:
lowerCAmelCase_ = TranslationModule(_lowercase )
lowerCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowerCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , _lowercase )
lowerCAmelCase_ = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
lowerCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase_ = False
lowerCAmelCase_ = args.val_metric == '''loss'''
lowerCAmelCase_ = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_lowercase ) )
if checkpoints:
lowerCAmelCase_ = checkpoints[-1]
lowerCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = pl.Trainer.add_argparse_args(parser)
_A = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
main(args)
| 278
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCamelCase ( __a :Union[str, Any] ) -> bool:
"""simple docstring"""
A__ = int(number**0.5 )
return number == sq * sq
def __lowerCamelCase ( __a :Tuple , __a :Optional[Any] , __a :Tuple , __a :Optional[Any] , __a :int , __a :Optional[int] ) -> tuple[int, int]:
"""simple docstring"""
A__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ = x_den * y_den * z_den
A__ = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCamelCase ( __a :List[Any] = 3_5 ) -> int:
"""simple docstring"""
A__ = set()
A__ = 4_2
A__ = Fraction(0 )
A__ = 4_2
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ = x_num * y_den + x_den * y_num
A__ = x_den * y_den
A__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
A__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
A__ = int(sqrt(_lowercase ) )
A__ = int(sqrt(_lowercase ) )
A__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
A__ = x_num * y_num
A__ = x_den * y_num + x_num * y_den
A__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
A__ = x_num * x_num * y_num * y_num
A__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
A__ = int(sqrt(_lowercase ) )
A__ = int(sqrt(_lowercase ) )
A__ = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _A () -> int:
'''simple docstring'''
_a = torch.nn.Linear(2 , 4 )
_a = torch.optim.AdamW(model.parameters() , lr=1.0 )
_a = torch.optim.lr_scheduler.OneCycleLR(_lowercase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_a = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_a = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _A (lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowercase )
class a ( _a ):
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A_ ):
_a = Accelerator(cpu=A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = Accelerator()
_a = GradientState()
assert state.num_steps == 1
_a = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_a = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCAmelCase ( self ) -> int:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__magic_name__ , **__magic_name__ ):
pass
with patch('torch.cuda.set_device' , A_ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
_a = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
_a = get_signature(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
_a = get_signature(A_ )
# saving hook
def save_config(__magic_name__ , __magic_name__ , __magic_name__ ):
_a = {'class_name': models[0].__class__.__name__}
with open(os.path.join(A_ , 'data.json' ) , 'w' ) as f:
json.dump(A_ , A_ )
# loading hook
def load_config(__magic_name__ , __magic_name__ ):
with open(os.path.join(A_ , 'data.json' ) , 'r' ) as f:
_a = json.load(A_ )
_a = config['class_name']
_a = accelerator.register_save_state_pre_hook(A_ )
_a = accelerator.register_load_state_pre_hook(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks removed
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = None
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertTrue(dummy_obj is None )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = [1, 2, 3]
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(A_ , '_is_accelerate_prepared' , A_ ) , A_ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map={'': 0} , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(A_ )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
_a = Accelerator()
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(A_ )
_a = 'cpu'
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=A_ , load_in_abit=A_ , llm_inta_enable_fpaa_cpu_offload=A_ )
# This should not work and get value error
with self.assertRaises(A_ ):
_a = accelerator.prepare(A_ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
_a = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(A_ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map=A_ , )
_a = Accelerator()
# This should not work and get value error
with self.assertRaises(A_ ):
_a = accelerator.prepare(A_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
_a = infer_auto_device_map(A_ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=A_ , device_map=A_ , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(A_ )
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = torch.nn.Linear(10 , 10 )
_a = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_a = Accelerator(cpu=A_ )
_a = accelerator.prepare(A_ )
| 168
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCamelCase__ :Any = DetaConfig(
backbone_config=_lowercase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowercase , with_box_refine=_lowercase , two_stage=_lowercase , )
# set labels
UpperCamelCase__ :List[Any] = '''huggingface/label-files'''
if "o365" in model_name:
UpperCamelCase__ :List[Any] = 366
UpperCamelCase__ :Tuple = '''object365-id2label.json'''
else:
UpperCamelCase__ :Tuple = 91
UpperCamelCase__ :Optional[int] = '''coco-detection-id2label.json'''
UpperCamelCase__ :Optional[Any] = num_labels
UpperCamelCase__ :int = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCamelCase__ :int = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase__ :str = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :int = dct.pop(_lowercase )
UpperCamelCase__ :str = val
def a ( __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ :Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ :Any = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:dim, :]
UpperCamelCase__ :Any = in_proj_bias[: dim]
UpperCamelCase__ :List[str] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ :str = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ :List[str] = in_proj_weight[
-dim :, :
]
UpperCamelCase__ :str = in_proj_bias[-dim :]
# fmt: on
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :int = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[int] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :str = in_proj_weight[:hidden_size, :]
UpperCamelCase__ :Dict = in_proj_bias[:hidden_size]
UpperCamelCase__ :str = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase__ :List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ :Union[str, Any] = in_proj_weight[-hidden_size:, :]
UpperCamelCase__ :Union[str, Any] = in_proj_bias[-hidden_size:]
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ :Optional[int] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = get_deta_config(_lowercase )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase__ :str = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ :List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCamelCase__ :Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_lowercase , param.shape )
# rename keys
UpperCamelCase__ :str = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_swin_q_k_v(_lowercase , config.backbone_config )
read_in_decoder_q_k_v(_lowercase , _lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase__ :Dict = state_dict.pop(_lowercase )
UpperCamelCase__ :Union[str, Any] = val
if "input_proj" in key:
UpperCamelCase__ :Any = state_dict.pop(_lowercase )
UpperCamelCase__ :Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase__ :Optional[Any] = state_dict.pop(_lowercase )
UpperCamelCase__ :List[str] = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ :List[str] = DetaForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase__ :Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_lowercase )
# load image processor
UpperCamelCase__ :Union[str, Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCamelCase__ :List[str] = prepare_img()
UpperCamelCase__ :Optional[Any] = processor(images=_lowercase , return_tensors='''pt''' )
UpperCamelCase__ :List[Any] = encoding['''pixel_values''']
UpperCamelCase__ :int = model(pixel_values.to(_lowercase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase__ :List[str] = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ :Any = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
UpperCamelCase__ :Any = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowercase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowercase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 97
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class A_ ( _a ):
_lowerCamelCase : List[str] = """swinv2"""
_lowerCamelCase : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , snake_case_ : Tuple=2_2_4 , snake_case_ : Tuple=4 , snake_case_ : Dict=3 , snake_case_ : Tuple=9_6 , snake_case_ : int=[2, 2, 6, 2] , snake_case_ : Optional[int]=[3, 6, 1_2, 2_4] , snake_case_ : Any=7 , snake_case_ : Union[str, Any]=4.0 , snake_case_ : Dict=True , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : List[str]=0.1 , snake_case_ : Dict="gelu" , snake_case_ : str=False , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Any=1e-5 , snake_case_ : Dict=3_2 , **snake_case_ : List[Any] , ):
super().__init__(**A_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(A_ )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(A_ ) - 1) )
_UpperCAmelCase = (0, 0, 0, 0)
| 22
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
import random
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[ord(A_) for i in text]
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Union[str, Any] =[]
for i in plain:
lowerCamelCase__: List[str] =random.randint(1 , 300)
lowerCamelCase__: Tuple =(i + k) * k
cipher.append(A_)
key.append(A_)
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =[]
for i in range(len(A_)):
lowerCamelCase__: List[str] =int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(A_))
return "".join(A_)
if __name__ == "__main__":
__A , __A = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 10
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
def lowerCamelCase__ ( _A = 4000000 ):
'''simple docstring'''
snake_case_ = []
snake_case_ , snake_case_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
snake_case_ , snake_case_ = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ ( a , a , a=1E-12 ) -> Any:
_A: Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
_A: List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
return jnp.matmul(_lowercase , norm_emb_a.T )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : List[Any] = 42
__UpperCamelCase : Any = jnp.floataa
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
_A: Dict = nn.Dense(self.config.projection_dim , use_bias=A_ , dtype=self.dtype )
_A: Optional[Any] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
_A: Any = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_A: int = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
_A: Optional[int] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.vision_model(A_ )[1]
_A: Union[str, Any] = self.visual_projection(A_ )
_A: int = jax_cosine_distance(A_ , self.special_care_embeds )
_A: str = jax_cosine_distance(A_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_A: Any = 0.0
_A: str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_A: Optional[int] = jnp.round(A_ , 3 )
_A: Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=A_ )
# Use a lower threshold if an image has any special care concept
_A: Optional[int] = is_special_care * 0.01
_A: List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_A: int = jnp.round(A_ , 3 )
_A: Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCAmelCase ( _a ):
'''simple docstring'''
__UpperCamelCase : int = CLIPConfig
__UpperCamelCase : str = '''clip_input'''
__UpperCamelCase : Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , lowerCAmelCase_ : CLIPConfig , lowerCAmelCase_ : Optional[Tuple] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : jnp.dtype = jnp.floataa , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
if input_shape is None:
_A: Any = (1, 2_2_4, 2_2_4, 3)
_A: Optional[Any] = self.module_class(config=A_ , dtype=A_ , **A_ )
super().__init__(A_ , A_ , input_shape=A_ , seed=A_ , dtype=A_ , _do_init=_do_init )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : jax.random.KeyArray , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : FrozenDict = None ):
"""simple docstring"""
_A: List[str] = jax.random.normal(A_ , A_ )
_A , _A: List[str] = jax.random.split(A_ )
_A: Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
_A: List[str] = self.module.init(A_ , A_ )['''params''']
return random_params
def __call__( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : dict = None , ):
"""simple docstring"""
_A: Optional[Any] = jnp.transpose(A_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(A_ , dtype=jnp.floataa ) , rngs={} , )
| 121
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class snake_case ( _a, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = CpmAntTokenizer
SCREAMING_SNAKE_CASE_ : Any = False
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Union[str, Any] = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__lowerCAmelCase: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
__lowerCAmelCase: List[str] = "今天天气真好!"
__lowerCAmelCase: List[Any] = ["今天", "天气", "真", "好", "!"]
__lowerCAmelCase: Optional[Any] = tokenizer.tokenize(A_)
self.assertListEqual(A_ , A_)
__lowerCAmelCase: int = "今天天气真好!"
__lowerCAmelCase: int = [tokenizer.bos_token] + tokens
__lowerCAmelCase: Optional[Any] = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_)
__lowerCAmelCase: str = tokenizer.decode(A_)
self.assertEqual(A_ , A_)
| 217
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
snake_case__ : List[Any] = """"""
snake_case__ : Tuple = """"""
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 256
snake_case__ : List[str] = 0
snake_case__ : List[str] = 0
snake_case__ : str = 0
snake_case__ : int = 0
def lowerCamelCase ( self : str , snake_case_ : int ):
snake_case__ : Tuple = cva.imread(A_ , 0 )
snake_case__ : Dict = copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : List[str] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
snake_case__ : Dict = np.sum(A_ )
for i in range(len(A_ ) ):
snake_case__ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case__ : Optional[int] = (self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : int = int(last % last )
snake_case__ : Dict = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
snake_case__ : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : List[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : Any = self.img[j][i]
if num != self.last_list[num]:
snake_case__ : Tuple = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowerCamelCase ( self : Dict ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase ( self : str ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__a = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__a = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 35
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : Any =tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] =sin(_lowercase )
UpperCAmelCase : Tuple =cos(_lowercase )
UpperCAmelCase : List[str] =_sin / (2 * q_factor)
UpperCAmelCase : Tuple =(1 - _cos) / 2
UpperCAmelCase : int =1 - _cos
UpperCAmelCase : Any =1 + alpha
UpperCAmelCase : Dict =-2 * _cos
UpperCAmelCase : List[str] =1 - alpha
UpperCAmelCase : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : Dict =tau * frequency / samplerate
UpperCAmelCase : Dict =sin(_lowercase )
UpperCAmelCase : Optional[int] =cos(_lowercase )
UpperCAmelCase : Optional[int] =_sin / (2 * q_factor)
UpperCAmelCase : List[Any] =(1 + _cos) / 2
UpperCAmelCase : List[str] =-1 - _cos
UpperCAmelCase : List[Any] =1 + alpha
UpperCAmelCase : Tuple =-2 * _cos
UpperCAmelCase : Optional[int] =1 - alpha
UpperCAmelCase : int =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : Optional[int] =tau * frequency / samplerate
UpperCAmelCase : Tuple =sin(_lowercase )
UpperCAmelCase : int =cos(_lowercase )
UpperCAmelCase : Optional[Any] =_sin / (2 * q_factor)
UpperCAmelCase : Any =_sin / 2
UpperCAmelCase : Any =0
UpperCAmelCase : str =-ba
UpperCAmelCase : Union[str, Any] =1 + alpha
UpperCAmelCase : str =-2 * _cos
UpperCAmelCase : Optional[int] =1 - alpha
UpperCAmelCase : Dict =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =tau * frequency / samplerate
UpperCAmelCase : Optional[Any] =sin(_lowercase )
UpperCAmelCase : Union[str, Any] =cos(_lowercase )
UpperCAmelCase : Dict =_sin / (2 * q_factor)
UpperCAmelCase : Tuple =1 - alpha
UpperCAmelCase : Any =-2 * _cos
UpperCAmelCase : Optional[Any] =1 + alpha
UpperCAmelCase : int =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) , )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : str =tau * frequency / samplerate
UpperCAmelCase : Optional[Any] =sin(_lowercase )
UpperCAmelCase : Optional[Any] =cos(_lowercase )
UpperCAmelCase : Union[str, Any] =_sin / (2 * q_factor)
UpperCAmelCase : Dict =10 ** (gain_db / 40)
UpperCAmelCase : Dict =1 + alpha * big_a
UpperCAmelCase : List[Any] =-2 * _cos
UpperCAmelCase : Optional[Any] =1 - alpha * big_a
UpperCAmelCase : Dict =1 + alpha / big_a
UpperCAmelCase : Any =-2 * _cos
UpperCAmelCase : Any =1 - alpha / big_a
UpperCAmelCase : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) , )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : List[str] =tau * frequency / samplerate
UpperCAmelCase : str =sin(_lowercase )
UpperCAmelCase : Union[str, Any] =cos(_lowercase )
UpperCAmelCase : Union[str, Any] =_sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] =10 ** (gain_db / 40)
UpperCAmelCase : Union[str, Any] =(big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[str] =(big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int =(big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : str =(big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Dict =2 * sqrt(_lowercase ) * alpha
UpperCAmelCase : Optional[int] =big_a * (pmc + aaa)
UpperCAmelCase : Union[str, Any] =2 * big_a * mpc
UpperCAmelCase : Optional[Any] =big_a * (pmc - aaa)
UpperCAmelCase : int =ppmc + aaa
UpperCAmelCase : Any =-2 * pmpc
UpperCAmelCase : int =ppmc - aaa
UpperCAmelCase : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 / sqrt(2 ) , )-> IIRFilter:
'''simple docstring'''
UpperCAmelCase : Dict =tau * frequency / samplerate
UpperCAmelCase : Optional[Any] =sin(_lowercase )
UpperCAmelCase : str =cos(_lowercase )
UpperCAmelCase : Union[str, Any] =_sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] =10 ** (gain_db / 40)
UpperCAmelCase : Dict =(big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Any =(big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : str =(big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] =(big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] =2 * sqrt(_lowercase ) * alpha
UpperCAmelCase : Optional[int] =big_a * (ppmc + aaa)
UpperCAmelCase : str =-2 * big_a * pmpc
UpperCAmelCase : str =big_a * (ppmc - aaa)
UpperCAmelCase : Union[str, Any] =pmc + aaa
UpperCAmelCase : int =2 * mpc
UpperCAmelCase : Union[str, Any] =pmc - aaa
UpperCAmelCase : int =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 348
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
import torch
from transformers import AutoModel
class A ( torch.nn.Module ):
def __init__( self, UpperCamelCase__="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(A_, self ).__init__()
lowerCAmelCase_ = AutoModel.from_pretrained(A_, return_dict=A_ )
lowerCAmelCase_ = torch.nn.CosineSimilarity(3, 1E-08 )
lowerCAmelCase_ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return self.bert(**A_ ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=A_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(A_, A_ ) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = W_supports['''sizes'''].tolist()
lowerCAmelCase_ = W_supports['''start_token_id'''].item()
lowerCAmelCase_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase_ = self.BERT(**A_ )
lowerCAmelCase_ = self.BERT(**A_ )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
lowerCAmelCase_ = 0
else:
lowerCAmelCase_ = support_sizes[i - 1]
lowerCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase_ = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase_ = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase_ = torch.vstack((p_starts, p_start) )
lowerCAmelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase_ = p_start
lowerCAmelCase_ = p_end
return p_starts, p_ends
| 278
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
A : Tuple = {
'''camembert-base''': 5_1_2,
}
A : List[Any] = '''▁'''
class A (_a ):
'''simple docstring'''
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : List[str]="</s>" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : Union[str, Any]="<mask>" , __lowerCAmelCase : int=["<s>NOTUSED", "</s>NOTUSED"] , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
A__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
A__ = len(self.fairseq_tokens_to_ids )
A__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Union[str, Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Dict , __lowerCAmelCase : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def a_ ( self : str , __lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A_ )
def a_ ( self : str , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ = []
A__ = """"""
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(A_ )
A__ = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self : int ) -> List[Any]:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : str , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 274
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
'''simple docstring'''
import math
def _A (lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
_a = f'Input value of [number={number}] must be an integer'
raise TypeError(_lowercase )
if number < 1:
_a = f'Input value of [number={number}] must be > 0'
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a = int(math.log(number // 3 , 2 ) ) + 2
_a = [3, 5]
_a = 2
_a = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
a_ : Optional[Any] = 0
try:
a_ : int = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 168
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def a ( __a , __a , __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = [0] * no_of_processes
UpperCamelCase__ :str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowercase ):
UpperCamelCase__ :Optional[int] = burst_time[i]
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :List[str] = 999999999
UpperCamelCase__ :Any = 0
UpperCamelCase__ :Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase__ :Optional[int] = remaining_time[j]
UpperCamelCase__ :int = j
UpperCamelCase__ :Optional[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase__ :int = remaining_time[short]
if minm == 0:
UpperCamelCase__ :Optional[int] = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase__ :Tuple = False
# Find finish time of current process
UpperCamelCase__ :Union[str, Any] = increment_time + 1
# Calculate waiting time
UpperCamelCase__ :Tuple = finish_time - arrival_time[short]
UpperCamelCase__ :int = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase__ :Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def a ( __a , __a , __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [0] * no_of_processes
for i in range(_lowercase ):
UpperCamelCase__ :Dict = burst_time[i] + waiting_time[i]
return turn_around_time
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Tuple = 0
for i in range(_lowercase ):
UpperCamelCase__ :List[Any] = total_waiting_time + waiting_time[i]
UpperCamelCase__ :Optional[int] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__snake_case = int(input())
__snake_case = [0] * no_of_processes
__snake_case = [0] * no_of_processes
__snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__snake_case , __snake_case = map(int, input().split())
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = burst_time
__snake_case = no_of_processes
__snake_case = waiting_time
__snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 97
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :str = 256
class A_ ( _a ):
_lowerCamelCase : List[str] = ["""melgan"""]
def __init__( self : List[str] , snake_case_ : SpectrogramNotesEncoder , snake_case_ : SpectrogramContEncoder , snake_case_ : TaFilmDecoder , snake_case_ : DDPMScheduler , snake_case_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
_UpperCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
_UpperCAmelCase = 4.0 # Largest value for most examples
_UpperCAmelCase = 1_2_8
self.register_modules(
notes_encoder=A_ , continuous_encoder=A_ , decoder=A_ , scheduler=A_ , melgan=A_ , )
def lowercase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any]=(-1.0, 1.0) , snake_case_ : Dict=False ):
_UpperCAmelCase , _UpperCAmelCase = output_range
if clip:
_UpperCAmelCase = torch.clip(A_ , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowercase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=(-1.0, 1.0) , snake_case_ : Dict=False ):
_UpperCAmelCase , _UpperCAmelCase = input_range
_UpperCAmelCase = torch.clip(A_ , A_ , A_ ) if clip else outputs
# Scale to [0, 1].
_UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowercase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : List[str] ):
_UpperCAmelCase = input_tokens > 0
_UpperCAmelCase , _UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=A_ , encoder_inputs_mask=A_ )
_UpperCAmelCase , _UpperCAmelCase = self.continuous_encoder(
encoder_inputs=A_ , encoder_inputs_mask=A_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : Any ):
_UpperCAmelCase = noise_time
if not torch.is_tensor(A_ ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase = self.decoder(
encodings_and_masks=A_ , decoder_input_tokens=A_ , decoder_noise_time=A_ )
return logits
@torch.no_grad()
def __call__( self : Tuple , snake_case_ : List[List[int]] , snake_case_ : Optional[torch.Generator] = None , snake_case_ : int = 1_0_0 , snake_case_ : bool = True , snake_case_ : str = "numpy" , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A_ )}.' )
_UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A_ , device=self.device )
for i, encoder_input_tokens in enumerate(A_ ):
if i == 0:
_UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCAmelCase = ones
_UpperCAmelCase = self.scale_features(
A_ , output_range=[-1.0, 1.0] , clip=A_ )
_UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A_ , continuous_mask=A_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.decode(
encodings_and_masks=A_ , input_tokens=A_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_UpperCAmelCase = self.scale_to_features(A_ , input_range=[-1.0, 1.0] )
_UpperCAmelCase = mel[:1]
_UpperCAmelCase = mel.cpu().float().numpy()
_UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ )
logger.info("Generated segment" , A_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
_UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A_ )
| 22
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
lowercase_ = 42
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Tuple=("DownEncoderBlock2D",) , UpperCAmelCase_ : str=(64,) , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple="silu" , UpperCAmelCase_ : Any=True , ) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Union[str, Any] =layers_per_block
lowerCamelCase__: str =torch.nn.Convad(
A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__: str =None
lowerCamelCase__: Union[str, Any] =nn.ModuleList([])
# down
lowerCamelCase__: Dict =block_out_channels[0]
for i, down_block_type in enumerate(A_):
lowerCamelCase__: Any =output_channel
lowerCamelCase__: List[str] =block_out_channels[i]
lowerCamelCase__: Dict =i == len(A_) - 1
lowerCamelCase__: Optional[Any] =get_down_block(
A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , )
self.down_blocks.append(A_)
# mid
lowerCamelCase__: Any =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# out
lowerCamelCase__: str =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1E-6)
lowerCamelCase__: List[str] =nn.SiLU()
lowerCamelCase__: Union[str, Any] =2 * out_channels if double_z else out_channels
lowerCamelCase__: Union[str, Any] =nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1)
lowerCamelCase__: Union[str, Any] =False
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =x
lowerCamelCase__: Tuple =self.conv_in(A_)
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : List[Any]):
def custom_forward(*UpperCAmelCase_ : Optional[Any]):
return module(*A_)
return custom_forward
# down
if is_torch_version(">=" , "1.11.0"):
for down_block in self.down_blocks:
lowerCamelCase__: List[str] =torch.utils.checkpoint.checkpoint(
create_custom_forward(A_) , A_ , use_reentrant=A_)
# middle
lowerCamelCase__: Dict =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , A_ , use_reentrant=A_)
else:
for down_block in self.down_blocks:
lowerCamelCase__: Tuple =torch.utils.checkpoint.checkpoint(create_custom_forward(A_) , A_)
# middle
lowerCamelCase__: Optional[Any] =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , A_)
else:
# down
for down_block in self.down_blocks:
lowerCamelCase__: Tuple =down_block(A_)
# middle
lowerCamelCase__: List[str] =self.mid_block(A_)
# post-process
lowerCamelCase__: List[str] =self.conv_norm_out(A_)
lowerCamelCase__: Tuple =self.conv_act(A_)
lowerCamelCase__: Tuple =self.conv_out(A_)
return sample
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : str=("UpDecoderBlock2D",) , UpperCAmelCase_ : Optional[Any]=(64,) , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Dict="silu" , UpperCAmelCase_ : Dict="group" , ) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[str] =layers_per_block
lowerCamelCase__: Optional[Any] =nn.Convad(
A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__: List[str] =None
lowerCamelCase__: int =nn.ModuleList([])
lowerCamelCase__: Any =in_channels if norm_type == "spatial" else None
# mid
lowerCamelCase__: List[Any] =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# up
lowerCamelCase__: Union[str, Any] =list(reversed(A_))
lowerCamelCase__: Dict =reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_):
lowerCamelCase__: List[str] =output_channel
lowerCamelCase__: Union[str, Any] =reversed_block_out_channels[i]
lowerCamelCase__: Union[str, Any] =i == len(A_) - 1
lowerCamelCase__: Any =get_up_block(
A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , )
self.up_blocks.append(A_)
lowerCamelCase__: int =output_channel
# out
if norm_type == "spatial":
lowerCamelCase__: List[str] =SpatialNorm(block_out_channels[0] , A_)
else:
lowerCamelCase__: Optional[Any] =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1E-6)
lowerCamelCase__: List[Any] =nn.SiLU()
lowerCamelCase__: Dict =nn.Convad(block_out_channels[0] , A_ , 3 , padding=1)
lowerCamelCase__: Dict =False
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =z
lowerCamelCase__: str =self.conv_in(A_)
lowerCamelCase__: Optional[int] =next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : Tuple):
def custom_forward(*UpperCAmelCase_ : Dict):
return module(*A_)
return custom_forward
if is_torch_version(">=" , "1.11.0"):
# middle
lowerCamelCase__: List[Any] =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , A_ , A_ , use_reentrant=A_)
lowerCamelCase__: Optional[Any] =sample.to(A_)
# up
for up_block in self.up_blocks:
lowerCamelCase__: List[str] =torch.utils.checkpoint.checkpoint(
create_custom_forward(A_) , A_ , A_ , use_reentrant=A_)
else:
# middle
lowerCamelCase__: List[str] =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , A_ , A_)
lowerCamelCase__: Optional[int] =sample.to(A_)
# up
for up_block in self.up_blocks:
lowerCamelCase__: int =torch.utils.checkpoint.checkpoint(create_custom_forward(A_) , A_ , A_)
else:
# middle
lowerCamelCase__: Optional[int] =self.mid_block(A_ , A_)
lowerCamelCase__: Optional[Any] =sample.to(A_)
# up
for up_block in self.up_blocks:
lowerCamelCase__: Union[str, Any] =up_block(A_ , A_)
# post-process
if latent_embeds is None:
lowerCamelCase__: List[str] =self.conv_norm_out(A_)
else:
lowerCamelCase__: List[str] =self.conv_norm_out(A_ , A_)
lowerCamelCase__: Dict =self.conv_act(A_)
lowerCamelCase__: Any =self.conv_out(A_)
return sample
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any="random" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=True) ->Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: int =n_e
lowerCamelCase__: Dict =vq_embed_dim
lowerCamelCase__: int =beta
lowerCamelCase__: Dict =legacy
lowerCamelCase__: List[str] =nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
lowerCamelCase__: Tuple =remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap)))
lowerCamelCase__: List[Any] =self.used.shape[0]
lowerCamelCase__: int =unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase__: Any =self.re_embed
lowerCamelCase__: str =self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""")
else:
lowerCamelCase__: List[str] =n_e
lowerCamelCase__: Optional[Any] =sane_index_shape
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =inds.shape
assert len(A_) > 1
lowerCamelCase__: str =inds.reshape(ishape[0] , -1)
lowerCamelCase__: Optional[int] =self.used.to(A_)
lowerCamelCase__: Optional[Any] =(inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase__: Dict =match.argmax(-1)
lowerCamelCase__: Dict =match.sum(2) < 1
if self.unknown_index == "random":
lowerCamelCase__: str =torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
lowerCamelCase__: Optional[int] =self.unknown_index
return new.reshape(A_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =inds.shape
assert len(A_) > 1
lowerCamelCase__: Union[str, Any] =inds.reshape(ishape[0] , -1)
lowerCamelCase__: List[Any] =self.used.to(A_)
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase__: Any =0 # simply set to zero
lowerCamelCase__: str =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_)
return back.reshape(A_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =z.permute(0 , 2 , 3 , 1).contiguous()
lowerCamelCase__: Dict =z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase__: Union[str, Any] =torch.argmin(torch.cdist(A_ , self.embedding.weight) , dim=1)
lowerCamelCase__: Tuple =self.embedding(A_).view(z.shape)
lowerCamelCase__: Dict =None
lowerCamelCase__: int =None
# compute loss for embedding
if not self.legacy:
lowerCamelCase__: Any =self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
lowerCamelCase__: Any =torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
lowerCamelCase__: Optional[int] =z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase__: str =z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
lowerCamelCase__: Union[str, Any] =min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
lowerCamelCase__: Union[str, Any] =self.remap_to_used(A_)
lowerCamelCase__: List[str] =min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
lowerCamelCase__: List[str] =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
if self.remap is not None:
lowerCamelCase__: Dict =indices.reshape(shape[0] , -1) # add batch axis
lowerCamelCase__: List[Any] =self.unmap_to_all(A_)
lowerCamelCase__: Dict =indices.reshape(-1) # flatten again
# get quantized latent vectors
lowerCamelCase__: Optional[int] =self.embedding(A_)
if shape is not None:
lowerCamelCase__: int =z_q.view(A_)
# reshape back to match original input shape
lowerCamelCase__: Dict =z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class _SCREAMING_SNAKE_CASE ( _a ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=False) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =parameters
lowerCamelCase__ , lowerCamelCase__: List[Any] =torch.chunk(A_ , 2 , dim=1)
lowerCamelCase__: Optional[Any] =torch.clamp(self.logvar , -30.0 , 20.0)
lowerCamelCase__: Optional[int] =deterministic
lowerCamelCase__: Dict =torch.exp(0.5 * self.logvar)
lowerCamelCase__: Dict =torch.exp(self.logvar)
if self.deterministic:
lowerCamelCase__: Optional[Any] =torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Optional[torch.Generator] = None) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =randn_tensor(
self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype)
lowerCamelCase__: Any =self.mean + self.std * sample
return x
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]=None) ->Any:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=[1, 2, 3]) ->Union[str, Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0])
lowerCamelCase__: Optional[int] =np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=A_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
return self.mean
| 10
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
| 187
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Union[str, Any] = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case :
def __init__( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]="resnet50" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Dict = out_indices if out_indices is not None else [4]
__lowerCAmelCase: str = stage_names
__lowerCAmelCase: Union[str, Any] = out_features
__lowerCAmelCase: Any = backbone
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Union[str, Any] = image_size
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: List[str] = use_pretrained_backbone
__lowerCAmelCase: int = is_training
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase: Optional[Any] = self.get_config()
return config, pixel_values
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = TimmBackbone(config=A_)
model.to(A_)
model.eval()
with torch.no_grad():
__lowerCAmelCase: Tuple = model(A_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowercase_ ( self : int)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase: int = config_and_inputs
__lowerCAmelCase: List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case ( _a, _a, _a, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = TimmBackboneModelTester(self)
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = "resnet18"
__lowerCAmelCase: List[str] = "microsoft/resnet-18"
__lowerCAmelCase: str = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_)
__lowerCAmelCase: Any = AutoBackbone.from_pretrained(A_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__lowerCAmelCase: str = AutoBackbone.from_pretrained(A_ , use_timm_backbone=A_ , out_indices=[1, 2, 3])
__lowerCAmelCase: Tuple = AutoBackbone.from_pretrained(A_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("TimmBackbone doesn\'t support feed forward chunking")
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn\'t have num_hidden_layers attribute")
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side")
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone models doesn\'t have inputs_embeds")
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone models doesn\'t have inputs_embeds")
def lowercase_ ( self : List[Any])-> List[Any]:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint")
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
pass
@unittest.skip("model weights aren\'t tied in TimmBackbone.")
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("model weights aren\'t tied in TimmBackbone.")
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn\'t have hidden size info in its configuration.")
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
pass
@unittest.skip("TimmBackbone doesn\'t support output_attentions.")
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
pass
@unittest.skip("Safetensors is not supported by timm.")
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Optional[int] = model_class(A_)
__lowerCAmelCase: Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: Union[str, Any] = [*signature.parameters.keys()]
__lowerCAmelCase: Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_)
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = True
__lowerCAmelCase: Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase: List[str] = self.all_model_classes[0]
__lowerCAmelCase: Any = model_class(A_)
model.to(A_)
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(A_ , A_)
__lowerCAmelCase: str = model(**A_)
__lowerCAmelCase: Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase: Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase: int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=A_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[str] = model_class(A_)
model.to(A_)
model.eval()
__lowerCAmelCase: str = model(**A_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase: Tuple = copy.deepcopy(A_)
__lowerCAmelCase: str = None
__lowerCAmelCase: str = model_class(A_)
model.to(A_)
model.eval()
__lowerCAmelCase: str = model(**A_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__lowerCAmelCase: Optional[Any] = copy.deepcopy(A_)
__lowerCAmelCase: List[str] = False
__lowerCAmelCase: int = model_class(A_)
model.to(A_)
model.eval()
__lowerCAmelCase: List[Any] = model(**A_)
| 217
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = (DDPMParallelScheduler,)
def lowerCamelCase ( self : Tuple , **snake_case_ : List[Any] ):
snake_case__ : Tuple = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**A_ )
return config
def lowerCamelCase ( self : str ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def lowerCamelCase ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowerCamelCase ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def lowerCamelCase ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def lowerCamelCase ( self : Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def lowerCamelCase ( self : List[str] ):
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def lowerCamelCase ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowerCamelCase ( self : int ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def lowerCamelCase ( self : str ):
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : Dict = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase ( self : int ):
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**A_ )
snake_case__ : List[str] = len(A_ )
snake_case__ : Union[str, Any] = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter
snake_case__ : int = self.dummy_sample_deter + 0.1
snake_case__ : Dict = self.dummy_sample_deter - 0.1
snake_case__ : List[Any] = samplea.shape[0]
snake_case__ : Union[str, Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case__ : Tuple = torch.arange(A_ )[0:3, None].repeat(1 , A_ )
snake_case__ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case__ : Optional[Any] = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
snake_case__ : int = torch.sum(torch.abs(A_ ) )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def lowerCamelCase ( self : str ):
snake_case__ : Optional[int] = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : Optional[int] = scheduler_class(**A_ )
snake_case__ : List[Any] = len(A_ )
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter
snake_case__ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
snake_case__ : Tuple = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
snake_case__ : List[str] = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
snake_case__ : Tuple = pred_prev_sample
snake_case__ : int = torch.sum(torch.abs(A_ ) )
snake_case__ : Tuple = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case__ : Optional[Any] = scheduler_class(**A_ )
snake_case__ : Union[str, Any] = len(A_ )
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
snake_case__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
snake_case__ : Optional[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
snake_case__ : List[str] = pred_prev_sample
snake_case__ : Tuple = torch.sum(torch.abs(A_ ) )
snake_case__ : Any = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**A_ )
snake_case__ : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
snake_case__ : str = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
snake_case__ : Optional[int] = -1
else:
snake_case__ : str = timesteps[i + 1]
snake_case__ : Any = scheduler.previous_timestep(A_ )
snake_case__ : List[str] = prev_t.item()
self.assertEqual(A_ , A_ )
def lowerCamelCase ( self : str ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**A_ )
snake_case__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : Optional[Any] = scheduler_class(**A_ )
snake_case__ : List[Any] = [100, 87, 50, 1, 0]
snake_case__ : Tuple = len(A_ )
with self.assertRaises(A_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**A_ )
snake_case__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A_ )
| 35
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> YolosConfig:
'''simple docstring'''
UpperCAmelCase : List[str] =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Union[str, Any] =1_92
UpperCAmelCase : Tuple =7_68
UpperCAmelCase : Optional[Any] =12
UpperCAmelCase : str =3
UpperCAmelCase : Optional[int] =[8_00, 13_33]
UpperCAmelCase : int =False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Tuple =3_30
UpperCAmelCase : List[Any] =14
UpperCAmelCase : Tuple =6
UpperCAmelCase : Dict =13_20
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] =3_84
UpperCAmelCase : Tuple =15_36
UpperCAmelCase : int =12
UpperCAmelCase : Dict =6
elif "yolos_b" in yolos_name:
UpperCAmelCase : List[str] =[8_00, 13_44]
UpperCAmelCase : List[str] =91
UpperCAmelCase : Tuple ='''huggingface/label-files'''
UpperCAmelCase : Tuple ='''coco-detection-id2label.json'''
UpperCAmelCase : Union[str, Any] =json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any ={int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] =idalabel
UpperCAmelCase : str ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False )-> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Any =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : List[str] =in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : List[str] =in_proj_bias[: config.hidden_size]
UpperCAmelCase : int =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Union[str, Any] =in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Any =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : Tuple =name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase : Tuple =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase : int =name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase : Optional[Any] =name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase : List[str] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase : Union[str, Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase : Tuple =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase : List[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase : Optional[int] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase : str =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase : Tuple =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : str =name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase : Dict =name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase : Optional[Any] =name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase : List[str] =name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[Any] =orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCAmelCase : Optional[int] =key.split('''.''' )
UpperCAmelCase : Union[str, Any] =int(key_split[2] )
UpperCAmelCase : str =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Union[str, Any] =val[:dim, :]
UpperCAmelCase : Dict =val[
dim : dim * 2, :
]
UpperCAmelCase : List[Any] =val[-dim:, :]
else:
UpperCAmelCase : Any =val[:dim]
UpperCAmelCase : Any =val[dim : dim * 2]
UpperCAmelCase : Union[str, Any] =val[-dim:]
else:
UpperCAmelCase : List[Any] =val
return orig_state_dict
def lowerCAmelCase_ ( )-> torch.Tensor:
'''simple docstring'''
UpperCAmelCase : List[str] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Dict =Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =get_yolos_config(_lowercase )
# load original state_dict
UpperCAmelCase : Tuple =torch.load(_lowercase , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase : Optional[Any] =YolosForObjectDetection(_lowercase )
model.eval()
UpperCAmelCase : Optional[int] =convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : List[Any] =8_00 if yolos_name != '''yolos_ti''' else 5_12
UpperCAmelCase : Tuple =YolosImageProcessor(format='''coco_detection''' , size=_lowercase )
UpperCAmelCase : Dict =image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Dict =model(**_lowercase )
UpperCAmelCase , UpperCAmelCase : Dict =outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Tuple =None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : Any =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase : List[str] =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Dict =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase : Dict =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : Tuple =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase : Optional[Any] =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : str =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase : Any =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase : Dict =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
UpperCAmelCase : Any ={
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase : Optional[Any] =model_mapping[yolos_name]
image_processor.push_to_hub(_lowercase , organization='''hustvl''' )
model.push_to_hub(_lowercase , organization='''hustvl''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_A = '''sshleifer/bart-tiny-random'''
_A = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(A_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(A_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(A_, tempfile.mkdtemp(), e=1, d=A_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(A_, tempfile.mkdtemp(), e=1, d=A_ )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(A_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(A_ ):
create_student_by_copying_alternating_layers(A_, tempfile.mkdtemp(), e=A_, d=A_ )
| 278
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 274
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.0_2 , __magic_name__=None , __magic_name__=2 , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
_a = ViTModel(config=A_ )
model.to(A_ )
model.eval()
_a = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = ViTForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
_a = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = ViTForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = self.type_sequence_label_size
_a = ViTForImageClassification(A_ )
model.to(A_ )
model.eval()
_a = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = ViTForImageClassification(A_ )
model.to(A_ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
_lowerCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = ViTModelTester(self )
_a = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __UpperCAmelCase ( self ) -> Dict:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _A () -> Optional[int]:
'''simple docstring'''
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> str:
_a = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(A_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
_a = model(**A_ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
_a = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self ) -> List[str]:
_a = ViTModel.from_pretrained('facebook/dino-vits8' ).to(A_ )
_a = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_a = prepare_img()
_a = image_processor(images=A_ , return_tensors='pt' )
_a = inputs.pixel_values.to(A_ )
# forward pass
with torch.no_grad():
_a = model(A_ , interpolate_pos_encoding=A_ )
# verify the logits
_a = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
_a = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self ) -> Tuple:
_a = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=A_ , return_tensors='pt' )
_a = inputs.pixel_values.to(A_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a = model(A_ )
| 168
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def a ( __a , __a , __a = None ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
UpperCamelCase__ :Tuple = to_pil_image(_lowercase )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = pil_image.size
UpperCamelCase__ :Tuple = pytesseract.image_to_data(_lowercase , lang=_lowercase , output_type='''dict''' , config=_lowercase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase__ :Union[str, Any] = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
UpperCamelCase__ :Dict = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCamelCase__ :int = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCamelCase__ :Optional[Any] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCamelCase__ :Any = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
UpperCamelCase__ :List[Any] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ :Optional[Any] = []
for x, y, w, h in zip(_lowercase , _lowercase , _lowercase , _lowercase ):
UpperCamelCase__ :Tuple = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
UpperCamelCase__ :Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase , _lowercase , _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( _a ):
"""simple docstring"""
_a = ['pixel_values']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = "" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase__ :List[Any] = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ :List[Any] = get_size_dict(A_ )
UpperCamelCase__ :Tuple = do_resize
UpperCamelCase__ :List[str] = size
UpperCamelCase__ :List[Any] = resample
UpperCamelCase__ :Tuple = apply_ocr
UpperCamelCase__ :Dict = ocr_lang
UpperCamelCase__ :Union[str, Any] = tesseract_config
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ :int = (size['''height'''], size['''width'''])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :int = size if size is not None else self.size
UpperCamelCase__ :Any = get_size_dict(A_ )
UpperCamelCase__ :Any = resample if resample is not None else self.resample
UpperCamelCase__ :Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ :Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ :str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ :int = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ :Tuple = [to_numpy_array(A_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase__ :Any = []
UpperCamelCase__ :int = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
UpperCamelCase__ :Any = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ :List[str] = [flip_channel_order(A_ ) for image in images]
UpperCamelCase__ :str = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase__ :Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=A_ )
if apply_ocr:
UpperCamelCase__ :List[str] = words_batch
UpperCamelCase__ :Optional[Any] = boxes_batch
return data
| 97
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
'''simple docstring'''
import math
import sys
def UpperCAmelCase_ ( __lowercase : Dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = ""
try:
with open(_lowercase , "rb" ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase = "", ""
_UpperCAmelCase = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase = last_match_id + "0"
if math.loga(_lowercase ).is_integer():
_UpperCAmelCase = {}
for curr_key in list(_lowercase ):
_UpperCAmelCase = lexicon.pop(_lowercase )
_UpperCAmelCase = new_lex
_UpperCAmelCase = last_match_id + "1"
index += 1
_UpperCAmelCase = ""
return result
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Tuple ) -> None:
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(_lowercase , "wb" ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase ) , _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase = data_bits[counter:]
_UpperCAmelCase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = read_file_binary(_lowercase )
_UpperCAmelCase = remove_prefix(_lowercase )
_UpperCAmelCase = decompress_data(_lowercase )
write_file_binary(_lowercase , _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 22
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
def lowerCAmelCase_ ( __a , __a , __a ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowercase ) )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
if index == len(_lowercase ):
return True
# Recursive Step
for i in range(_lowercase ):
if valid_coloring(graph[index] , _lowercase , _lowercase ):
# Color current vertex
lowerCamelCase__: int =i
# Validate coloring
if util_color(_lowercase , _lowercase , _lowercase , index + 1 ):
return True
# Backtrack
lowerCamelCase__: List[Any] =-1
return False
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: int =[-1] * len(_lowercase )
if util_color(_lowercase , _lowercase , _lowercase , 0 ):
return colored_vertices
return []
| 10
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase__ : List[str] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class UpperCAmelCase ( unittest.TestCase , _a ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = load_tool("text-question-answering" )
self.tool.setup()
snake_case_ = load_tool("text-question-answering" , remote=A_ )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.remote_tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.remote_tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
| 187
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class UpperCAmelCase ( _a ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''xlnet'''
__UpperCamelCase : List[Any] = ['''mems''']
__UpperCamelCase : List[Any] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int]=3_2_0_0_0 , lowerCAmelCase_ : List[Any]=1_0_2_4 , lowerCAmelCase_ : List[str]=2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : Optional[Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="bi" , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[int]=1e-12 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Tuple=-1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : str="last" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]="tanh" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]=2 , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
_A: str = vocab_size
_A: Tuple = d_model
_A: Dict = n_layer
_A: Optional[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"""\'d_model % n_head\' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_A: Union[str, Any] = d_model // n_head
_A: List[Any] = ff_activation
_A: List[Any] = d_inner
_A: Optional[int] = untie_r
_A: Any = attn_type
_A: Optional[Any] = initializer_range
_A: Optional[int] = layer_norm_eps
_A: str = dropout
_A: str = mem_len
_A: Union[str, Any] = reuse_len
_A: Any = bi_data
_A: Any = clamp_len
_A: Any = same_length
_A: str = summary_type
_A: Union[str, Any] = summary_use_proj
_A: Optional[int] = summary_activation
_A: Optional[int] = summary_last_dropout
_A: Union[str, Any] = start_n_top
_A: Optional[Any] = end_n_top
_A: Optional[int] = bos_token_id
_A: str = pad_token_id
_A: List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , A_ , )
_A: List[str] = kwargs['''use_cache''']
_A: str = use_mems_eval
_A: Union[str, Any] = use_mems_train
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 121
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Any = 42
# setable values
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : int = 42
SCREAMING_SNAKE_CASE_ : str = None
@classmethod
def lowercase_ ( cls : Optional[int] , UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray)-> Dict:
'''simple docstring'''
return cls(common=A_ , init_noise_sigma=A_ , timesteps=A_)
@dataclass
class snake_case ( _a ):
SCREAMING_SNAKE_CASE_ : int = 42
class snake_case ( _a, _a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42
@property
def lowercase_ ( self : List[Any])-> Optional[Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : float = 0.0001 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : str = "linear" , UpperCamelCase__ : Optional[jnp.ndarray] = None , UpperCamelCase__ : str = "fixed_small" , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : jnp.dtype = jnp.floataa , )-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = dtype
def lowercase_ ( self : str , UpperCamelCase__ : Optional[CommonSchedulerState] = None)-> Dict:
'''simple docstring'''
if common is None:
__lowerCAmelCase: str = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__lowerCAmelCase: List[str] = jnp.array(1.0 , dtype=self.dtype)
__lowerCAmelCase: Optional[int] = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=A_ , init_noise_sigma=A_ , timesteps=A_ , )
def lowercase_ ( self : Dict , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : Optional[int] = None)-> Dict:
'''simple docstring'''
return sample
def lowercase_ ( self : int , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : int , UpperCamelCase__ : Tuple = ())-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCAmelCase: Any = (jnp.arange(0 , A_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A_ , timesteps=A_ , )
def lowercase_ ( self : str , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[Any] = state.common.alphas_cumprod[t]
__lowerCAmelCase: int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCAmelCase: Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCAmelCase: Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCAmelCase: Optional[Any] = jnp.clip(A_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCAmelCase: Optional[int] = jnp.log(jnp.clip(A_ , a_min=1e-20))
elif variance_type == "fixed_large":
__lowerCAmelCase: Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCAmelCase: Optional[Any] = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCAmelCase: Any = variance
__lowerCAmelCase: Dict = state.common.betas[t]
__lowerCAmelCase: Any = (predicted_variance + 1) / 2
__lowerCAmelCase: Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : Optional[jax.random.KeyArray] = None , UpperCamelCase__ : bool = True , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = timestep
if key is None:
__lowerCAmelCase: str = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase: List[str] = jnp.split(A_ , sample.shape[1] , axis=1)
else:
__lowerCAmelCase: List[Any] = None
# 1. compute alphas, betas
__lowerCAmelCase: int = state.common.alphas_cumprod[t]
__lowerCAmelCase: Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__lowerCAmelCase: Tuple = 1 - alpha_prod_t
__lowerCAmelCase: List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCAmelCase: Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCAmelCase: Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCAmelCase: List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCAmelCase: List[Any] = jnp.clip(A_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCAmelCase: Optional[int] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCAmelCase: Optional[int] = jax.random.split(A_ , num=1)
__lowerCAmelCase: List[str] = jax.random.normal(A_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(A_ , A_ , predicted_variance=A_) ** 0.5) * noise
__lowerCAmelCase: Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__lowerCAmelCase: List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A_ , state=A_)
def lowercase_ ( self : int , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , )-> List[Any]:
'''simple docstring'''
return add_noise_common(state.common , A_ , A_ , A_)
def lowercase_ ( self : Tuple , UpperCamelCase__ : DDPMSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , )-> Any:
'''simple docstring'''
return get_velocity_common(state.common , A_ , A_ , A_)
def __len__( self : List[str])-> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 217
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = torch.device("cpu")
def __snake_case( ) -> Optional[int]:
snake_case__ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __snake_case( _lowerCAmelCase ) -> Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = dct.pop(_lowercase )
snake_case__ : Dict = val
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Tuple = []
for k in state_dict.keys():
snake_case__ : Optional[int] = k
if ".pwconv" in k:
snake_case__ : List[str] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
snake_case__ : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
snake_case__ : Union[str, Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
snake_case__ : Optional[int] = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
snake_case__ : Any = k_new.split(""".""" )
if ls[2].isdigit():
snake_case__ : Dict = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
snake_case__ : List[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : Any = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Union[str, Any] = 1_000
snake_case__ : Union[str, Any] = """huggingface/label-files"""
snake_case__ : List[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
snake_case__ : str = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ : List[Any] = [3, 3, 6, 4]
snake_case__ : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case__ : List[Any] = [3, 3, 9, 6]
snake_case__ : Dict = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case__ : str = [4, 3, 10, 5]
snake_case__ : List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case__ : Optional[int] = [4, 4, 12, 6]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
snake_case__ : int = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
snake_case__ : int = torch.load(_lowercase , map_location="""cpu""" )
snake_case__ : Dict = checkpoint
snake_case__ : List[Any] = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
snake_case__ : Any = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
snake_case__ : Dict = prepare_img()
snake_case__ : List[str] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
snake_case__ : Tuple = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
snake_case__ : Any = get_expected_output(_lowercase )
snake_case__ : List[Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__a = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 35
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple =pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : List[str] =text_generator('''This is a test''' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
UpperCAmelCase : Optional[int] =text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
A_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
UpperCAmelCase : int =text_generator('''This is a test''' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'''generated_token_ids''': ANY(A_ )},
{'''generated_token_ids''': ANY(A_ )},
] , )
UpperCAmelCase : Optional[int] =text_generator.model.config.eos_token_id
UpperCAmelCase : List[Any] ='''<pad>'''
UpperCAmelCase : List[str] =text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'''generated_token_ids''': ANY(A_ )},
{'''generated_token_ids''': ANY(A_ )},
],
[
{'''generated_token_ids''': ANY(A_ )},
{'''generated_token_ids''': ANY(A_ )},
],
] , )
@require_tf
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : Optional[Any] =text_generator('''This is a test''' , do_sample=A_ )
self.assertEqual(
A_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
UpperCAmelCase : Optional[Any] =text_generator(['''This is a test''', '''This is a second test'''] , do_sample=A_ )
self.assertEqual(
A_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =TextGenerationPipeline(model=A_ , tokenizer=A_ )
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ='''Hello I believe in'''
UpperCAmelCase : int =pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase : Union[str, Any] =text_generator(A_ )
self.assertEqual(
A_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
UpperCAmelCase : Optional[int] =text_generator(A_ , stop_sequence=''' fe''' )
self.assertEqual(A_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =text_generator.model
UpperCAmelCase : Union[str, Any] =text_generator.tokenizer
UpperCAmelCase : List[str] =text_generator('''This is a test''' )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCAmelCase : Optional[Any] =text_generator('''This is a test''' , return_full_text=A_ )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCAmelCase : List[str] =pipeline(task='''text-generation''' , model=A_ , tokenizer=A_ , return_full_text=A_ )
UpperCAmelCase : Tuple =text_generator('''This is a test''' )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCAmelCase : Dict =text_generator('''This is a test''' , return_full_text=A_ )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCAmelCase : Dict =text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase : List[Any] =text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
UpperCAmelCase : Optional[int] =text_generator('''test''' , return_full_text=A_ , return_text=A_ )
with self.assertRaises(A_ ):
UpperCAmelCase : int =text_generator('''test''' , return_full_text=A_ , return_tensors=A_ )
with self.assertRaises(A_ ):
UpperCAmelCase : Optional[Any] =text_generator('''test''' , return_text=A_ , return_tensors=A_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase : Optional[int] =text_generator('''''' )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase : Optional[int] =text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase : Any =['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
UpperCAmelCase : Union[str, Any] =text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A_ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCAmelCase : Optional[int] =pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Tuple =pipe('''This is a test''' )
self.assertEqual(
A_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase : Any =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : str =pipe('''This is a test''' )
self.assertEqual(
A_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase : Dict =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase : Optional[int] =pipe('''This is a test''' )
self.assertEqual(
A_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
UpperCAmelCase : Any =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase : str =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=A_ , top_p=0.5 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int ='''Hello world'''
UpperCAmelCase : Any =pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
UpperCAmelCase : List[Any] =logging.get_logger('''transformers.generation.tf_utils''' )
else:
UpperCAmelCase : Optional[Any] =logging.get_logger('''transformers.generation.utils''' )
UpperCAmelCase : List[Any] ='''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A_ ) as cl:
UpperCAmelCase : Dict =text_generator(A_ , max_length=10 , max_new_tokens=1 )
self.assertIn(A_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A_ ) as cl:
UpperCAmelCase : Dict =text_generator(A_ , max_new_tokens=1 )
self.assertNotIn(A_ , cl.out )
with CaptureLogger(A_ ) as cl:
UpperCAmelCase : Optional[int] =text_generator(A_ , max_length=10 )
self.assertNotIn(A_ , cl.out )
| 348
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class A ( _a ):
__snake_case = 'data2vec-vision'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=True, UpperCamelCase__=[3, 5, 7, 11], UpperCamelCase__=[1, 2, 3, 6], UpperCamelCase__=True, UpperCamelCase__=0.4, UpperCamelCase__=256, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=255, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**A_ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class A ( _a ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A : Optional[Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A : Union[str, Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowerCamelCase ( __a :Optional[int] ) -> str:
"""simple docstring"""
if "://" in dataset_path:
A__ = dataset_path.split("""://""" )[1]
return dataset_path
def __lowerCamelCase ( __a :Union[str, Any] ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowerCamelCase ( __a :Tuple , __a :str , __a :Any ) -> Optional[int]:
"""simple docstring"""
A__ = not is_remote_filesystem(_lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) )
else:
fs.mv(_lowercase , _lowercase , recursive=_lowercase )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A__ = None
A__ = None
A__ = threading.Lock()
| 274
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
return "".join(chr(ord(_lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 168
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a ( __a ) -> Dict:
'''simple docstring'''
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_lowercase , '''_dynamo''' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def a ( __a , __a = True ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Dict = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ :int = is_compiled_module(_lowercase )
if is_compiled:
UpperCamelCase__ :List[str] = model
UpperCamelCase__ :int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
UpperCamelCase__ :str = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ :Union[str, Any] = getattr(_lowercase , '''forward''' )
UpperCamelCase__ :int = model.__dict__.pop('''_original_forward''' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '''__wrapped__''' ):
UpperCamelCase__ :List[Any] = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ :Any = forward
if getattr(_lowercase , '''_converted_to_transformer_engine''' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
UpperCamelCase__ :str = model
UpperCamelCase__ :int = compiled_model
return model
def a ( ) -> Any:
'''simple docstring'''
PartialState().wait_for_everyone()
def a ( __a , __a ) -> Any:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def a ( **__a ) -> Union[str, Any]:
'''simple docstring'''
for key, value in kwargs.items():
UpperCamelCase__ :Optional[int] = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a ( __a ) -> Tuple:
'''simple docstring'''
if not hasattr(_lowercase , '''__qualname__''' ) and not hasattr(_lowercase , '''__name__''' ):
UpperCamelCase__ :str = getattr(_lowercase , '''__class__''' , _lowercase )
if hasattr(_lowercase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_lowercase , '''__name__''' ):
return obj.__name__
return str(_lowercase )
def a ( __a , __a ) -> Any:
'''simple docstring'''
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
UpperCamelCase__ :Any = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
UpperCamelCase__ :Tuple = value
return destination
def a ( __a = None ) -> bool:
'''simple docstring'''
if port is None:
UpperCamelCase__ :str = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 97
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : List[Any] ) -> Dict:
'''simple docstring'''
with open(_lowercase ) as metadata_file:
_UpperCAmelCase = json.load(_lowercase )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken("<ent>" , lstrip=_lowercase , rstrip=_lowercase )
_UpperCAmelCase = AddedToken("<ent2>" , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , "r" ) as f:
_UpperCAmelCase = json.load(_lowercase )
_UpperCAmelCase = "MLukeTokenizer"
with open(os.path.join(_lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCAmelCase = state_dict["embeddings.word_embeddings.weight"]
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCAmelCase = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict["entity_predictions.bias"]
_UpperCAmelCase = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase , task="entity_classification" )
_UpperCAmelCase = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_lowercase )
_UpperCAmelCase = "Tokyo is the capital of <mask>."
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors="pt" )
_UpperCAmelCase = model(**_lowercase )
_UpperCAmelCase = encoding["input_ids"][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowercase ) )
model.save_pretrained(_lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCAmelCase = [json.loads(_lowercase ) for line in open(_lowercase )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 22
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( __a , __a , __a = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase ), magnitude * sin(_lowercase )]
return [magnitude * cos(radians(_lowercase ) ), magnitude * sin(radians(_lowercase ) )]
def lowerCAmelCase_ ( __a , __a , __a = 10**-1 ) -> bool:
"""simple docstring"""
lowerCamelCase__: Optional[int] =cross(_lowercase , _lowercase )
lowerCamelCase__: List[Any] =sum(_lowercase )
return abs(_lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
__A = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
__A = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__A = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__A = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__A = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
__A = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 10
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase ( _a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case__ ( __lowercase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self : str ):
"""simple docstring"""
raise NotImplementedError()
| 187
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( _a ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Dict ):
"""simple docstring"""
super().__init__(*A_ , **A_ )
_A: Dict = eval_examples
_A: Optional[int] = post_process_function
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str = "eval" ):
"""simple docstring"""
_A: Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
_A: List[str] = self.get_eval_dataloader(A_ )
_A: Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A: List[Any] = self.compute_metrics
_A: Tuple = None
_A: Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A: str = time.time()
try:
_A: List[Any] = eval_loop(
A_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
_A: Tuple = compute_metrics
_A: Optional[int] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A: Optional[int] = self.post_process_function(A_ , A_ , output.predictions )
_A: Optional[Any] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_A: Optional[int] = metrics.pop(A_ )
metrics.update(output.metrics )
else:
_A: Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A: List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str = "test" ):
"""simple docstring"""
_A: Any = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
_A: Dict = self.compute_metrics
_A: Tuple = None
_A: Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A: int = time.time()
try:
_A: List[Any] = eval_loop(
A_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
_A: str = compute_metrics
_A: Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A: Any = self.post_process_function(A_ , A_ , output.predictions , '''predict''' )
_A: str = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_A: Tuple = metrics.pop(A_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
| 121
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def a__ ( ) -> List[Any]:
from torch.utils.cpp_extension import load
__lowerCAmelCase: Tuple = Path(_lowercase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
__lowerCAmelCase: Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _lowercase , with_cuda=_lowercase , extra_include_paths=[str(_lowercase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 217
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
'''simple docstring'''
import math
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =AlbertConfig.from_json_file(_lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : Union[str, Any] =AlbertForPreTraining(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase_ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase_ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_lowercase , output_all_encodings=_lowercase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _lowercase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase_ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase_ = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase_ = _load_vocab(_lowercase , _lowercase , _lowercase , cls=_lowercase )
lowerCAmelCase_ = nlp.model.BERTModel(
_lowercase , len(_lowercase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_lowercase , use_token_type_embed=_lowercase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_lowercase , use_decoder=_lowercase , )
original_bort.load_parameters(_lowercase , cast_dtype=_lowercase , ignore_extra=_lowercase )
lowerCAmelCase_ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase_ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_lowercase ),
}
lowerCAmelCase_ = BertConfig.from_dict(_lowercase )
lowerCAmelCase_ = BertForMaskedLM(_lowercase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_A , _A ):
lowerCAmelCase_ = hf_param.shape
lowerCAmelCase_ = to_torch(params[gluon_param] )
lowerCAmelCase_ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
lowerCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase_ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase_ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase_ = layer.attention.self
lowerCAmelCase_ = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
lowerCAmelCase_ = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
lowerCAmelCase_ = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
lowerCAmelCase_ = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
lowerCAmelCase_ = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
lowerCAmelCase_ = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
lowerCAmelCase_ = layer.attention.output
lowerCAmelCase_ = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
lowerCAmelCase_ = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
lowerCAmelCase_ = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
lowerCAmelCase_ = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
lowerCAmelCase_ = layer.intermediate
lowerCAmelCase_ = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
lowerCAmelCase_ = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
lowerCAmelCase_ = layer.output
lowerCAmelCase_ = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
lowerCAmelCase_ = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
lowerCAmelCase_ = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
lowerCAmelCase_ = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase_ = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase_ = tokenizer.encode_plus(_lowercase )['''input_ids''']
# Get gluon output
lowerCAmelCase_ = mx.nd.array([input_ids] )
lowerCAmelCase_ = original_bort(inputs=_lowercase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_lowercase )
lowerCAmelCase_ = BertModel.from_pretrained(_lowercase )
hf_bort_model.eval()
lowerCAmelCase_ = tokenizer.encode_plus(_lowercase , return_tensors='''pt''' )
lowerCAmelCase_ = hf_bort_model(**_lowercase )[0]
lowerCAmelCase_ = output_gluon[0].asnumpy()
lowerCAmelCase_ = output_hf[0].detach().numpy()
lowerCAmelCase_ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase_ = np.allclose(_lowercase , _lowercase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _lowercase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 278
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A : List[Any] = None
A : List[str] = logging.get_logger(__name__)
A : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A : Optional[int] = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A (_a ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[Any] = TaTokenizer
__lowerCamelCase : Any = []
def __init__( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : Union[str, Any]=1_00 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
A__ = [f'<extra_id_{i}>' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A__ = len(set(filter(lambda __lowerCAmelCase : bool("""extra_id_""" in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
A_ , tokenizer_file=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = extra_ids
@staticmethod
def a_ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , A_ , )
return max_model_length
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> List[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Optional[int]:
"""simple docstring"""
A__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[str]:
"""simple docstring"""
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : str ) -> int:
"""simple docstring"""
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(R"""<extra_id_\d+>""" , A_ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [self.convert_tokens_to_ids(A_ ) for token in self.get_sentinel_tokens()]
| 274
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class a ( _a ):
_lowerCAmelCase = RobertaConfig
_lowerCAmelCase = """roberta"""
def __init__( self , __magic_name__ ) -> int:
super().__init__(A_ )
_a = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class a ( _a ):
_lowerCAmelCase = RobertaConfig
_lowerCAmelCase = """roberta"""
def __init__( self , __magic_name__ ) -> List[str]:
super().__init__(A_ )
_a = config.num_labels
_a = config.num_hidden_layers
_a = DeeRobertaModel(A_ )
_a = nn.Dropout(config.hidden_dropout_prob )
_a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> List[str]:
_a = self.num_layers
try:
_a = self.roberta(
A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , )
_a = outputs[1]
_a = self.dropout(A_ )
_a = self.classifier(A_ )
_a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_a = e.message
_a = e.exit_layer
_a = outputs[0]
if not self.training:
_a = entropy(A_ )
_a = []
_a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_a = []
for highway_exit in outputs[-1]:
_a = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
_a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_a = (loss,) + outputs
if not self.training:
_a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 168
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.